CodeSAAT commited on
Commit
f2276c1
·
verified ·
1 Parent(s): 29da26e

Upload 8 files

Browse files
Files changed (8) hide show
  1. Procfile +1 -0
  2. app.cpython-312.pyc +0 -0
  3. app.py +101 -0
  4. detector.cpython-312.pyc +0 -0
  5. detector.py +233 -0
  6. index.html +500 -0
  7. kiro.json +29 -0
  8. python.config +12 -0
Procfile ADDED
@@ -0,0 +1 @@
 
 
1
+ web: cd backend && uvicorn app:app --host 0.0.0.0 --port 8000
app.cpython-312.pyc ADDED
Binary file (5.07 kB). View file
 
app.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Vocal Guard - Deepfake Audio Detector
3
+ FastAPI Backend | AWS-Compatible
4
+ """
5
+ from fastapi import FastAPI, File, UploadFile, HTTPException
6
+ from fastapi.middleware.cors import CORSMiddleware
7
+ from fastapi.staticfiles import StaticFiles
8
+ from fastapi.responses import FileResponse
9
+ import uvicorn
10
+ import numpy as np
11
+ import io
12
+ import os
13
+ import logging
14
+
15
+ from detector import VocalGuardDetector
16
+
17
+ logging.basicConfig(level=logging.INFO)
18
+ logger = logging.getLogger(__name__)
19
+
20
+ app = FastAPI(
21
+ title="Vocal Guard API",
22
+ description="Real-time deepfake audio detection using Mel-spectrogram analysis",
23
+ version="1.0.0"
24
+ )
25
+
26
+ app.add_middleware(
27
+ CORSMiddleware,
28
+ allow_origins=["*"],
29
+ allow_credentials=True,
30
+ allow_methods=["*"],
31
+ allow_headers=["*"],
32
+ )
33
+
34
+ detector = VocalGuardDetector()
35
+
36
+ # Serve frontend static files
37
+ frontend_path = os.path.join(os.path.dirname(__file__), "..", "frontend")
38
+ app.mount("/static", StaticFiles(directory=frontend_path), name="static")
39
+
40
+
41
+ @app.get("/")
42
+ async def root():
43
+ return FileResponse(os.path.join(frontend_path, "index.html"))
44
+
45
+
46
+ @app.get("/health")
47
+ async def health_check():
48
+ return {"status": "healthy", "model": "VocalGuard v3.0", "ready": True}
49
+
50
+
51
+ @app.post("/analyze")
52
+ async def analyze_audio(file: UploadFile = File(...)):
53
+ """
54
+ Analyze audio file for deepfake detection.
55
+ Accepts WAV/WebM audio, returns prediction with confidence.
56
+ """
57
+ try:
58
+ if not file.content_type or not any(
59
+ ct in file.content_type for ct in ["audio", "octet-stream", "webm", "wav", "ogg"]
60
+ ):
61
+ logger.warning(f"Received content type: {file.content_type}")
62
+
63
+ audio_bytes = await file.read()
64
+
65
+ if len(audio_bytes) < 100:
66
+ raise HTTPException(status_code=400, detail="Audio too short or empty")
67
+
68
+ logger.info(f"Processing audio chunk: {len(audio_bytes)} bytes")
69
+
70
+ result = detector.predict(audio_bytes)
71
+ return result
72
+
73
+ except HTTPException:
74
+ raise
75
+ except Exception as e:
76
+ logger.error(f"Analysis error: {e}")
77
+ raise HTTPException(status_code=500, detail=f"Analysis failed: {str(e)}")
78
+
79
+
80
+ @app.post("/analyze-stream")
81
+ async def analyze_stream(file: UploadFile = File(...)):
82
+ """
83
+ Lightweight endpoint optimized for real-time streaming analysis.
84
+ Returns faster results with less feature detail.
85
+ """
86
+ try:
87
+ audio_bytes = await file.read()
88
+ if len(audio_bytes) < 50:
89
+ return {"label": "unknown", "confidence": 0, "processing_ms": 0}
90
+
91
+ result = detector.predict_fast(audio_bytes)
92
+ return result
93
+
94
+ except Exception as e:
95
+ logger.error(f"Stream analysis error: {e}")
96
+ return {"label": "unknown", "confidence": 0.5, "processing_ms": 0}
97
+
98
+
99
+ if __name__ == "__main__":
100
+ port = int(os.environ.get("PORT", 8000))
101
+ uvicorn.run("app:app", host="0.0.0.0", port=port, reload=False)
detector.cpython-312.pyc ADDED
Binary file (13.9 kB). View file
 
detector.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ VocalGuard
3
+ File uploads: inverted MelodyMachine labels (real=AI, fake=human)
4
+ Mic input: normal labels (fake=AI, real=human) + conservative threshold
5
+ """
6
+ import numpy as np
7
+ import librosa, soundfile as sf
8
+ import time, io, logging, warnings
9
+ from typing import Dict, Any, Tuple, List
10
+
11
+ warnings.filterwarnings("ignore")
12
+ logger = logging.getLogger(__name__)
13
+
14
+ try:
15
+ import torch
16
+ from transformers import AutoModelForAudioClassification, AutoFeatureExtractor
17
+ TORCH_OK = True
18
+ except ImportError:
19
+ TORCH_OK = False
20
+
21
+
22
+ class VocalGuardDetector:
23
+
24
+ SR = 16000
25
+ MIN_DURATION = 0.5
26
+
27
+ def __init__(self):
28
+ logger.info("VocalGuard v7.2 initializing...")
29
+ self.local_model = None
30
+ self.local_extractor = None
31
+ self._try_load_local()
32
+ logger.info("VocalGuard v7.2 ready.")
33
+
34
+ def _try_load_local(self):
35
+ if not TORCH_OK:
36
+ logger.warning("torch not available")
37
+ return
38
+ try:
39
+ model_id = "MelodyMachine/Deepfake-audio-detection-V2"
40
+ self.local_extractor = AutoFeatureExtractor.from_pretrained(model_id)
41
+ self.local_model = AutoModelForAudioClassification.from_pretrained(model_id)
42
+ self.local_model.eval()
43
+ logger.info(f"Model loaded: {self.local_model.config.id2label}")
44
+ except Exception as e:
45
+ logger.error(f"Model load failed: {e}")
46
+
47
+ # ── AUDIO LOADING ─────────────────────────────────────────────────────────
48
+ def _load(self, audio_bytes: bytes) -> np.ndarray:
49
+ for fn in [
50
+ lambda b: sf.read(io.BytesIO(b), always_2d=False),
51
+ lambda b: librosa.load(io.BytesIO(b), sr=None, mono=True),
52
+ lambda b: (np.frombuffer(b, dtype=np.int16).astype(np.float32) / 32768.0, 16000),
53
+ ]:
54
+ try:
55
+ y, sr = fn(audio_bytes)
56
+ if hasattr(y, 'ndim') and y.ndim > 1:
57
+ y = y.mean(axis=1)
58
+ if len(y) > 100:
59
+ if sr != self.SR:
60
+ y = librosa.resample(y, orig_sr=sr, target_sr=self.SR)
61
+ return y.astype(np.float32)
62
+ except Exception:
63
+ continue
64
+ raise ValueError("Cannot decode audio")
65
+
66
+ # ── FILE UPLOAD INFERENCE ─────────────────────────────────────────────────
67
+ def _infer_file(self, y: np.ndarray) -> Tuple[float, str]:
68
+ """
69
+ MelodyMachine inverted labels for file uploads (confirmed from testing):
70
+ 'real' score = AI probability
71
+ 'fake' score = human probability
72
+ """
73
+ min_len = self.SR * 3
74
+ if len(y) < min_len:
75
+ y = np.pad(y, (0, min_len - len(y)))
76
+
77
+ inputs = self.local_extractor(
78
+ y, sampling_rate=self.SR,
79
+ return_tensors="pt", padding=True
80
+ )
81
+ with torch.no_grad():
82
+ logits = self.local_model(**inputs).logits
83
+ probs = torch.softmax(logits, dim=-1)[0].numpy()
84
+ id2label = self.local_model.config.id2label
85
+
86
+ logger.info(f"File probs: { {id2label[i]: round(float(probs[i]), 4) for i in range(len(probs))} }")
87
+
88
+ # INVERTED labels for this model on file uploads
89
+ ai_prob = float(probs[0])
90
+ for idx, lbl in id2label.items():
91
+ if "real" in lbl.lower():
92
+ ai_prob = float(probs[idx]) # real = AI
93
+ break
94
+ for idx, lbl in id2label.items():
95
+ if "fake" in lbl.lower():
96
+ ai_prob = float(1.0 - probs[idx]) # fake = human → invert
97
+ break
98
+
99
+ logger.info(f"File AI prob: {ai_prob:.4f}")
100
+ return float(np.clip(ai_prob, 0.01, 0.99)), "model_file"
101
+
102
+ # ── MIC INFERENCE ────────────────────────────────────────────────────────
103
+ def _infer_mic(self, y: np.ndarray) -> Tuple[float, str]:
104
+ """
105
+ Normal label logic for mic audio:
106
+ 'fake' = AI, 'real' = human
107
+ Conservative threshold applied to reduce false positives.
108
+ """
109
+ from scipy import signal as scipy_signal
110
+
111
+ # High-pass filter to remove room rumble
112
+ sos = scipy_signal.butter(4, 80, 'hp', fs=self.SR, output='sos')
113
+ y = scipy_signal.sosfilt(sos, y).astype(np.float32)
114
+
115
+ # Normalize
116
+ peak = np.max(np.abs(y))
117
+ if peak > 0.001:
118
+ y /= peak
119
+
120
+ # Pad to 4 seconds minimum
121
+ min_len = self.SR * 4
122
+ if len(y) < min_len:
123
+ y = np.pad(y, (0, min_len - len(y)))
124
+
125
+ inputs = self.local_extractor(
126
+ y, sampling_rate=self.SR,
127
+ return_tensors="pt", padding=True
128
+ )
129
+ with torch.no_grad():
130
+ logits = self.local_model(**inputs).logits
131
+ probs = torch.softmax(logits, dim=-1)[0].numpy()
132
+ id2label = self.local_model.config.id2label
133
+
134
+ logger.info(f"Mic probs: { {id2label[i]: round(float(probs[i]), 4) for i in range(len(probs))} }")
135
+
136
+ # NORMAL labels for mic
137
+ ai_prob = float(probs[0])
138
+ for idx, lbl in id2label.items():
139
+ if "fake" in lbl.lower():
140
+ ai_prob = float(probs[idx])
141
+ break
142
+ for idx, lbl in id2label.items():
143
+ if "real" in lbl.lower():
144
+ ai_prob = float(1.0 - probs[idx])
145
+ break
146
+
147
+ # Conservative: compress uncertain results toward human
148
+ # Only flag strong AI detections (>0.70) on mic
149
+ if ai_prob < 0.70:
150
+ ai_prob = ai_prob * 0.45
151
+
152
+ logger.info(f"Mic AI prob (after conservative threshold): {ai_prob:.4f}")
153
+ return float(np.clip(ai_prob, 0.01, 0.99)), "model_mic"
154
+
155
+ # ── MAIN PREDICT ──────────────────────────────────────────────────────────
156
+ def predict(self, audio_bytes: bytes, is_mic: bool = False) -> Dict[str, Any]:
157
+ t0 = time.time()
158
+
159
+ y = self._load(audio_bytes)
160
+ y, _ = librosa.effects.trim(y, top_db=25)
161
+
162
+ if len(y) < self.SR * self.MIN_DURATION:
163
+ return self._err(t0, "Too short — speak for at least 1 second")
164
+ peak = np.max(np.abs(y))
165
+ if peak < 0.003:
166
+ return self._err(t0, "Signal too quiet — check microphone")
167
+ y /= (peak + 1e-10)
168
+ dur = len(y) / self.SR
169
+
170
+ if self.local_model is None:
171
+ return self._err(t0, "Model not loaded — check torch/transformers installation")
172
+
173
+ logger.info(f"Source: {'mic' if is_mic else 'file'} | duration: {dur:.1f}s")
174
+
175
+ try:
176
+ if is_mic:
177
+ ai_prob, method = self._infer_mic(y)
178
+ else:
179
+ ai_prob, method = self._infer_file(y)
180
+ except Exception as e:
181
+ logger.error(f"Inference error: {e}")
182
+ return self._err(t0, f"Detection failed: {str(e)[:80]}")
183
+
184
+ ai_prob = float(np.clip(ai_prob, 0.01, 0.99))
185
+ label = "AI Generated" if ai_prob >= 0.5 else "Human Voice"
186
+ conf = ai_prob if ai_prob >= 0.5 else (1 - ai_prob)
187
+ d = abs(ai_prob - 0.5)
188
+ tier = "High" if d > 0.28 else ("Medium" if d > 0.13 else "Low")
189
+
190
+ logger.info(f"Final → {label} ({conf*100:.1f}%) via {method}")
191
+
192
+ return {
193
+ "label": label,
194
+ "confidence": round(conf * 100, 1),
195
+ "confidence_tier": tier,
196
+ "ai_probability": round(ai_prob, 4),
197
+ "human_probability": round(1 - ai_prob, 4),
198
+ "duration_seconds": round(dur, 2),
199
+ "processing_ms": int((time.time() - t0) * 1000),
200
+ "detection_method": method,
201
+ "feature_scores": {
202
+ "AI Probability": round(ai_prob, 4),
203
+ "Human Probability": round(1 - ai_prob, 4),
204
+ },
205
+ "key_indicators": self._indicators(ai_prob, method),
206
+ }
207
+
208
+ def predict_fast(self, audio_bytes: bytes, is_mic: bool = False) -> Dict[str, Any]:
209
+ return self.predict(audio_bytes, is_mic=is_mic)
210
+
211
+ def _err(self, t0, msg):
212
+ return {
213
+ "label": "unknown", "confidence": 0,
214
+ "ai_probability": 0.5, "human_probability": 0.5,
215
+ "processing_ms": int((time.time() - t0) * 1000),
216
+ "warning": msg, "feature_scores": {}, "key_indicators": []
217
+ }
218
+
219
+ def _indicators(self, ai_prob: float, method: str) -> List[str]:
220
+ out = []
221
+ if method == "model_mic":
222
+ out.append("🎙️ Live mic analysis — upload file for highest accuracy")
223
+ else:
224
+ out.append("🔬 ML model analysis on uploaded file")
225
+ if ai_prob > 0.75:
226
+ out.append("⚠️ Strong AI synthesis markers detected")
227
+ elif ai_prob > 0.50:
228
+ out.append("⚠️ Possible AI synthesis detected")
229
+ elif ai_prob < 0.25:
230
+ out.append("✅ Strong natural human speech markers")
231
+ else:
232
+ out.append("✅ Natural human speech markers present")
233
+ return out
index.html ADDED
@@ -0,0 +1,500 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0"/>
6
+ <title>Vocal Guard — Deepfake Audio Detector</title>
7
+ <style>
8
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700;800;900&display=swap');
9
+ :root {
10
+ --red: #E84B35; --red-light: #ff6b55; --bg: #F5F2EE;
11
+ --dark: #0F0F0F; --card: #FFFFFF; --border: rgba(0,0,0,0.08);
12
+ --green: #22C55E; --text-muted: #6B7280;
13
+ }
14
+ * { box-sizing: border-box; margin: 0; padding: 0; }
15
+ body { font-family: 'Inter', sans-serif; background: var(--bg); color: var(--dark); min-height: 100vh; }
16
+ header {
17
+ background: var(--dark); padding: 0 40px;
18
+ display: flex; align-items: center; justify-content: space-between;
19
+ height: 64px; position: sticky; top: 0; z-index: 100;
20
+ }
21
+ .logo { display: flex; align-items: center; gap: 10px; }
22
+ .logo-text { font-size: 16px; font-weight: 700; color: white; }
23
+ .logo-text span { color: var(--red); }
24
+ .hero { text-align: center; padding: 60px 20px 40px; }
25
+ .hero-tag {
26
+ display: inline-flex; align-items: center; gap: 6px;
27
+ background: rgba(232,75,53,0.08); color: var(--red);
28
+ padding: 5px 14px; border-radius: 20px; font-size: 12px; font-weight: 600;
29
+ margin-bottom: 20px; border: 1px solid rgba(232,75,53,0.15);
30
+ }
31
+ h1 { font-size: clamp(36px,6vw,72px); font-weight: 900; letter-spacing: -2px; line-height: 1; margin-bottom: 16px; }
32
+ h1 em { color: var(--red); font-style: normal; }
33
+ .hero-sub { font-size: 16px; color: var(--text-muted); max-width: 500px; margin: 0 auto 40px; line-height: 1.6; }
34
+ .detector-card {
35
+ max-width: 800px; margin: 0 auto 40px;
36
+ background: var(--card); border-radius: 24px;
37
+ border: 1px solid var(--border);
38
+ box-shadow: 0 4px 6px -1px rgba(0,0,0,0.05), 0 20px 60px -10px rgba(0,0,0,0.08);
39
+ overflow: hidden;
40
+ }
41
+ .card-top { background: var(--dark); padding: 32px; display: flex; flex-direction: column; align-items: center; gap: 24px; }
42
+ .visualizer-wrap { width: 100%; border-radius: 16px; overflow: hidden; background: rgba(255,255,255,0.03); border: 1px solid rgba(255,255,255,0.06); }
43
+ #waveCanvas { width: 100%; height: 80px; display: block; }
44
+ .mic-btn {
45
+ width: 80px; height: 80px; border-radius: 50%;
46
+ background: linear-gradient(135deg, var(--red), #c0392b);
47
+ border: none; cursor: pointer; display: flex; align-items: center; justify-content: center;
48
+ transition: all 0.2s; box-shadow: 0 0 0 0 rgba(232,75,53,0.4);
49
+ }
50
+ .mic-btn:hover { transform: scale(1.08); }
51
+ .mic-btn.recording { animation: mic-pulse 1.5s ease-in-out infinite; }
52
+ @keyframes mic-pulse { 0%,100%{box-shadow:0 0 0 0 rgba(232,75,53,0.5)} 50%{box-shadow:0 0 0 20px rgba(232,75,53,0)} }
53
+ .mic-btn svg { width: 28px; height: 28px; fill: white; }
54
+ .status-bar { display: flex; align-items: center; gap: 8px; font-size: 12px; font-weight: 500; color: rgba(255,255,255,0.5); }
55
+ .status-dot { width: 6px; height: 6px; border-radius: 50%; background: #6B7280; }
56
+ .status-dot.active { background: var(--red); animation: blink 1s infinite; }
57
+ @keyframes blink { 0%,100%{opacity:1} 50%{opacity:0.3} }
58
+ .result-panel { padding: 32px; }
59
+ .result-idle { text-align: center; padding: 40px 20px; color: var(--text-muted); }
60
+ .spinner { width: 40px; height: 40px; border-radius: 50%; border: 3px solid var(--border); border-top-color: var(--red); animation: spin 0.8s linear infinite; margin: 0 auto 16px; }
61
+ @keyframes spin { to { transform: rotate(360deg); } }
62
+ .analyzing-wrap { display: none; text-align: center; padding: 32px; }
63
+ .analyzing-wrap.visible { display: block; }
64
+ .verdict-wrap { display: none; }
65
+ .verdict-wrap.visible { display: block; }
66
+ .verdict-header { display: flex; align-items: center; gap: 16px; margin-bottom: 24px; }
67
+ .verdict-badge { flex-shrink: 0; padding: 10px 20px; border-radius: 12px; font-size: 15px; font-weight: 700; }
68
+ .verdict-badge.human { background: rgba(34,197,94,0.12); color: #16a34a; border: 1px solid rgba(34,197,94,0.2); }
69
+ .verdict-badge.ai { background: rgba(232,75,53,0.1); color: var(--red); border: 1px solid rgba(232,75,53,0.2); }
70
+ .verdict-badge.unknown { background: rgba(107,114,128,0.1); color: var(--text-muted); border: 1px solid var(--border); }
71
+ .verdict-label { font-size: 20px; font-weight: 800; letter-spacing: -0.5px; }
72
+ .verdict-conf { font-size: 13px; color: var(--text-muted); margin-top: 2px; }
73
+ .conf-bar-wrap { margin-bottom: 28px; }
74
+ .conf-bar-labels { display: flex; justify-content: space-between; margin-bottom: 6px; }
75
+ .conf-bar-labels span { font-size: 11px; font-weight: 600; color: var(--text-muted); text-transform: uppercase; }
76
+ .conf-bar-track { height: 10px; background: #f3f4f6; border-radius: 5px; overflow: hidden; position: relative; }
77
+ .conf-bar-fill { height: 100%; border-radius: 5px; transition: width 0.6s ease; background: linear-gradient(90deg, var(--green), #16a34a); }
78
+ .conf-bar-fill.ai { background: linear-gradient(90deg, var(--red), #c0392b); }
79
+ .conf-bar-marker { position: absolute; top: 0; left: 50%; width: 2px; height: 100%; background: rgba(0,0,0,0.2); }
80
+ .section-label { font-size: 11px; font-weight: 700; text-transform: uppercase; letter-spacing: 1px; color: var(--text-muted); margin-bottom: 12px; }
81
+ .feature-grid { display: grid; grid-template-columns: repeat(auto-fill, minmax(190px,1fr)); gap: 10px; margin-bottom: 24px; }
82
+ .feature-item { background: var(--bg); border-radius: 10px; padding: 12px 14px; border: 1px solid var(--border); }
83
+ .feature-name { font-size: 11px; font-weight: 600; color: var(--text-muted); margin-bottom: 6px; text-transform: uppercase; letter-spacing: 0.4px; }
84
+ .feature-bar-track { height: 4px; background: rgba(0,0,0,0.08); border-radius: 2px; overflow: hidden; }
85
+ .feature-bar-fill { height: 100%; border-radius: 2px; transition: width 0.4s ease; }
86
+ .feature-score { font-size: 12px; font-weight: 700; margin-top: 4px; }
87
+ .indicators { display: flex; flex-direction: column; gap: 6px; margin-bottom: 24px; }
88
+ .indicator-item { font-size: 13px; background: var(--bg); border-radius: 8px; padding: 8px 12px; border: 1px solid var(--border); }
89
+ .meta-row { display: flex; gap: 12px; flex-wrap: wrap; }
90
+ .meta-chip { background: var(--bg); border: 1px solid var(--border); border-radius: 8px; padding: 6px 12px; font-size: 11px; color: var(--text-muted); font-weight: 500; }
91
+ .meta-chip strong { color: var(--dark); }
92
+ .tabs { display: flex; gap: 4px; background: rgba(0,0,0,0.05); padding: 4px; border-radius: 12px; width: fit-content; }
93
+ .tab-btn { padding: 8px 20px; border-radius: 8px; border: none; font-size: 13px; font-weight: 600; cursor: pointer; transition: all 0.15s; background: transparent; color: var(--text-muted); }
94
+ .tab-btn.active { background: white; color: var(--dark); box-shadow: 0 1px 4px rgba(0,0,0,0.1); }
95
+ .tab-content { display: none; }
96
+ .tab-content.active { display: block; }
97
+ .upload-zone {
98
+ border: 2px dashed var(--border); border-radius: 16px; padding: 48px;
99
+ text-align: center; cursor: pointer; transition: all 0.2s;
100
+ }
101
+ .upload-zone:hover, .upload-zone.drag-over { border-color: var(--red); background: rgba(232,75,53,0.03); }
102
+ .upload-zone p { color: var(--text-muted); font-size: 14px; }
103
+ #fileInput { display: none; }
104
+ .confidence-tier { display: inline-block; padding: 2px 8px; border-radius: 6px; font-size: 10px; font-weight: 700; margin-left: 8px; text-transform: uppercase; }
105
+ .tier-high { background: rgba(34,197,94,0.15); color: #16a34a; }
106
+ .tier-medium { background: rgba(245,158,11,0.15); color: #b45309; }
107
+ .tier-low { background: rgba(107,114,128,0.15); color: #6B7280; }
108
+ .mic-note {
109
+ background: rgba(232,75,53,0.07);
110
+ border: 1px solid rgba(232,75,53,0.2);
111
+ border-radius: 10px;
112
+ padding: 10px 16px;
113
+ margin-bottom: 16px;
114
+ font-size: 13px;
115
+ color: #b94030;
116
+ font-weight: 500;
117
+ text-align: center;
118
+ }
119
+ .credit {
120
+ position: fixed; bottom: 16px; right: 20px;
121
+ font-size: 18px; font-weight: 600; color: #6B7280; letter-spacing: 0.3px;
122
+ z-index: 999;
123
+ }
124
+ .credit span { color: #E84B35; }
125
+ </style>
126
+ </head>
127
+ <body>
128
+
129
+ <header>
130
+ <div class="logo">
131
+ <span class="logo-text">VOCAL<span>GUARD</span></span>
132
+ </div>
133
+ </header>
134
+
135
+ <div class="hero">
136
+ <div class="hero-tag">AI-Powered Audio Forensics</div>
137
+ <h1>Detect <em>Deepfake</em><br>Audio Instantly</h1>
138
+ <p class="hero-sub">Real-time ML analysis catches synthetic voice artifacts invisible to the human ear.</p>
139
+ </div>
140
+
141
+ <div style="display:flex;justify-content:center;margin-bottom:16px">
142
+ <div class="tabs">
143
+ <button class="tab-btn active" onclick="switchTab('mic')">🎙️ Live Microphone</button>
144
+ <button class="tab-btn" onclick="switchTab('upload')">📁 Upload File</button>
145
+ </div>
146
+ </div>
147
+
148
+ <!-- MIC TAB -->
149
+ <div id="tab-mic" class="tab-content active">
150
+ <div class="detector-card">
151
+ <div class="card-top">
152
+ <div class="visualizer-wrap">
153
+ <canvas id="waveCanvas" height="80"></canvas>
154
+ </div>
155
+ <div style="display:flex;flex-direction:column;align-items:center;gap:8px">
156
+ <button class="mic-btn" id="micBtn" onclick="toggleRecording()">
157
+ <svg viewBox="0 0 24 24"><path d="M12 2a3 3 0 0 1 3 3v7a3 3 0 0 1-6 0V5a3 3 0 0 1 3-3z"/><path d="M19 10v2a7 7 0 0 1-14 0v-2"/><line x1="12" y1="19" x2="12" y2="23"/><line x1="8" y1="23" x2="16" y2="23"/></svg>
158
+ </button>
159
+ <div class="status-bar">
160
+ <div class="status-dot" id="statusDot"></div>
161
+ <span id="statusText">Click to start analysis</span>
162
+ </div>
163
+ </div>
164
+ </div>
165
+ <div class="result-panel">
166
+ <div class="mic-note">
167
+ 💡 For more accurate results, use the <strong>Upload File</strong> tab
168
+ </div>
169
+ <div class="result-idle" id="micIdle">
170
+ <p style="font-weight:600;color:#374151;margin-bottom:4px">Ready to Analyze</p>
171
+ <p style="font-size:13px">Click the microphone to begin real-time detection</p>
172
+ </div>
173
+ <div class="analyzing-wrap" id="micAnalyzing">
174
+ <div class="spinner"></div>
175
+ <p style="font-weight:600">Analyzing audio...</p>
176
+ <p style="font-size:13px;color:var(--text-muted);margin-top:4px">Running ML model inference</p>
177
+ </div>
178
+ <div class="verdict-wrap" id="micVerdict">
179
+ <div class="verdict-header">
180
+ <div class="verdict-badge" id="micBadge"></div>
181
+ <div>
182
+ <div class="verdict-label" id="micLabel"></div>
183
+ <div class="verdict-conf" id="micConf"></div>
184
+ </div>
185
+ </div>
186
+ <div class="conf-bar-wrap">
187
+ <div class="conf-bar-labels"><span>Human ✅</span><span>AI Synthetic ⚠️</span></div>
188
+ <div class="conf-bar-track">
189
+ <div class="conf-bar-marker"></div>
190
+ <div class="conf-bar-fill" id="micBar" style="width:50%"></div>
191
+ </div>
192
+ </div>
193
+ <div class="section-label">Detection Scores</div>
194
+ <div class="feature-grid" id="micGrid"></div>
195
+ <div id="micIndWrap" style="display:none">
196
+ <div class="section-label">Key Indicators</div>
197
+ <div class="indicators" id="micIndList"></div>
198
+ </div>
199
+ <div class="meta-row" id="micMeta"></div>
200
+ </div>
201
+ </div>
202
+ </div>
203
+ </div>
204
+
205
+ <!-- UPLOAD TAB -->
206
+ <div id="tab-upload" class="tab-content">
207
+ <div class="detector-card">
208
+ <div style="padding:32px">
209
+ <div class="upload-zone" id="uploadZone"
210
+ onclick="document.getElementById('fileInput').click()"
211
+ ondragover="event.preventDefault();this.classList.add('drag-over')"
212
+ ondragleave="this.classList.remove('drag-over')"
213
+ ondrop="handleDrop(event)">
214
+ <p style="font-size:32px;margin-bottom:12px">📁</p>
215
+ <p><strong>Click to upload</strong> or drag & drop</p>
216
+ <p style="font-size:12px;margin-top:6px">WAV, MP3, OGG, WEBM supported</p>
217
+ </div>
218
+ <input type="file" id="fileInput" accept="audio/*" onchange="handleFileUpload(event)"/>
219
+ </div>
220
+ <div class="result-panel" style="padding-top:0">
221
+ <div class="result-idle" id="uploadIdle">
222
+ <p style="font-weight:600;color:#374151;margin-bottom:4px">No File Selected</p>
223
+ <p style="font-size:13px">Upload an audio file to analyze</p>
224
+ </div>
225
+ <div class="analyzing-wrap" id="uploadAnalyzing">
226
+ <div class="spinner"></div>
227
+ <p style="font-weight:600">Analyzing file...</p>
228
+ </div>
229
+ <div class="verdict-wrap" id="uploadVerdict">
230
+ <div class="verdict-header">
231
+ <div class="verdict-badge" id="uploadBadge"></div>
232
+ <div>
233
+ <div class="verdict-label" id="uploadLabel"></div>
234
+ <div class="verdict-conf" id="uploadConf"></div>
235
+ </div>
236
+ </div>
237
+ <div class="conf-bar-wrap">
238
+ <div class="conf-bar-labels"><span>Human ✅</span><span>AI Synthetic ⚠️</span></div>
239
+ <div class="conf-bar-track">
240
+ <div class="conf-bar-marker"></div>
241
+ <div class="conf-bar-fill" id="uploadBar" style="width:50%"></div>
242
+ </div>
243
+ </div>
244
+ <div class="section-label">Detection Scores</div>
245
+ <div class="feature-grid" id="uploadGrid"></div>
246
+ <div id="uploadIndWrap" style="display:none">
247
+ <div class="section-label">Key Indicators</div>
248
+ <div class="indicators" id="uploadIndList"></div>
249
+ </div>
250
+ <div class="meta-row" id="uploadMeta"></div>
251
+ </div>
252
+ </div>
253
+ </div>
254
+ </div>
255
+
256
+ <div class="credit">Developed &amp; trained by <span>CODE SAAT</span></div>
257
+
258
+ <script>
259
+ const API = window.location.origin;
260
+ let mediaRecorder = null;
261
+ let audioCtx = null;
262
+ let analyser = null;
263
+ let animFrame = null;
264
+ let isRecording = false;
265
+ let recordInterval = null;
266
+ let chunks = [];
267
+ let resultBuffer = [];
268
+ const SMOOTH_WINDOW = 3;
269
+
270
+ function switchTab(tab) {
271
+ document.querySelectorAll('.tab-btn').forEach((b, i) => {
272
+ b.classList.toggle('active', (i === 0 && tab === 'mic') || (i === 1 && tab === 'upload'));
273
+ });
274
+ document.getElementById('tab-mic').classList.toggle('active', tab === 'mic');
275
+ document.getElementById('tab-upload').classList.toggle('active', tab === 'upload');
276
+ if (tab !== 'mic' && isRecording) stopRecording();
277
+ }
278
+
279
+ function startWaveform(stream) {
280
+ audioCtx = new AudioContext();
281
+ analyser = audioCtx.createAnalyser();
282
+ analyser.fftSize = 1024;
283
+ audioCtx.createMediaStreamSource(stream).connect(analyser);
284
+ const canvas = document.getElementById('waveCanvas');
285
+ const ctx = canvas.getContext('2d');
286
+ canvas.width = canvas.offsetWidth * window.devicePixelRatio;
287
+ canvas.height = 80 * window.devicePixelRatio;
288
+ ctx.scale(window.devicePixelRatio, window.devicePixelRatio);
289
+ const buf = new Uint8Array(analyser.frequencyBinCount);
290
+ function draw() {
291
+ animFrame = requestAnimationFrame(draw);
292
+ analyser.getByteTimeDomainData(buf);
293
+ const w = canvas.width / window.devicePixelRatio, h = 80;
294
+ ctx.clearRect(0, 0, w, h);
295
+ ctx.beginPath();
296
+ ctx.strokeStyle = '#E84B35';
297
+ ctx.lineWidth = 1.5;
298
+ ctx.shadowColor = 'rgba(232,75,53,0.5)';
299
+ ctx.shadowBlur = 8;
300
+ const sliceW = w / buf.length;
301
+ let x = 0;
302
+ for (let i = 0; i < buf.length; i++) {
303
+ const y2 = (buf[i] / 128) * h / 2;
304
+ i === 0 ? ctx.moveTo(x, y2) : ctx.lineTo(x, y2);
305
+ x += sliceW;
306
+ }
307
+ ctx.lineTo(w, h / 2);
308
+ ctx.stroke();
309
+ }
310
+ draw();
311
+ }
312
+
313
+ function stopWaveform() {
314
+ if (animFrame) cancelAnimationFrame(animFrame);
315
+ const canvas = document.getElementById('waveCanvas');
316
+ canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height);
317
+ if (audioCtx) { audioCtx.close(); audioCtx = null; }
318
+ }
319
+
320
+ function toggleRecording() {
321
+ isRecording ? stopRecording() : startRecording();
322
+ }
323
+
324
+ async function startRecording() {
325
+ try {
326
+ const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
327
+ startWaveform(stream);
328
+ isRecording = true;
329
+ resultBuffer = [];
330
+ document.getElementById('micBtn').classList.add('recording');
331
+ document.getElementById('statusDot').className = 'status-dot active';
332
+ document.getElementById('statusText').textContent = 'Recording & analyzing...';
333
+ document.getElementById('micIdle').style.display = 'none';
334
+ document.getElementById('micAnalyzing').classList.add('visible');
335
+
336
+ function startChunk() {
337
+ if (!isRecording) return;
338
+ chunks = [];
339
+ const mime = getSupportedMime();
340
+ mediaRecorder = new MediaRecorder(stream, mime ? { mimeType: mime } : {});
341
+ mediaRecorder.ondataavailable = e => { if (e.data.size > 0) chunks.push(e.data); };
342
+ mediaRecorder.onstop = () => sendChunk([...chunks], mediaRecorder.mimeType);
343
+ mediaRecorder.start();
344
+ recordInterval = setTimeout(() => {
345
+ if (mediaRecorder && mediaRecorder.state === 'recording') {
346
+ mediaRecorder.stop();
347
+ setTimeout(startChunk, 300);
348
+ }
349
+ }, 5000);
350
+ }
351
+ startChunk();
352
+ } catch (err) {
353
+ alert('Microphone access denied. Please allow microphone permissions.');
354
+ }
355
+ }
356
+
357
+ function stopRecording() {
358
+ isRecording = false;
359
+ resultBuffer = [];
360
+ if (recordInterval) clearTimeout(recordInterval);
361
+ if (mediaRecorder && mediaRecorder.state === 'recording') mediaRecorder.stop();
362
+ stopWaveform();
363
+ document.getElementById('micBtn').classList.remove('recording');
364
+ document.getElementById('statusDot').className = 'status-dot';
365
+ document.getElementById('statusText').textContent = 'Click to start analysis';
366
+ document.getElementById('micAnalyzing').classList.remove('visible');
367
+ }
368
+
369
+ function getSupportedMime() {
370
+ const types = ['audio/webm;codecs=opus','audio/webm','audio/ogg;codecs=opus','audio/ogg'];
371
+ return types.find(t => MediaRecorder.isTypeSupported(t)) || '';
372
+ }
373
+
374
+ function smoothResult(newResult) {
375
+ if (!newResult.ai_probability || newResult.label === 'unknown') return newResult;
376
+ resultBuffer.push(newResult);
377
+ if (resultBuffer.length > SMOOTH_WINDOW) resultBuffer.shift();
378
+ if (resultBuffer.length === 1) return newResult;
379
+ const weights = resultBuffer.map((_, i) => i + 1);
380
+ const totalW = weights.reduce((a, b) => a + b, 0);
381
+ const smoothed = resultBuffer.reduce((sum, r, i) => sum + r.ai_probability * weights[i], 0) / totalW;
382
+ const label = smoothed >= 0.5 ? 'AI Generated' : 'Human Voice';
383
+ const conf = smoothed >= 0.5 ? smoothed : (1 - smoothed);
384
+ return { ...newResult, ai_probability: +smoothed.toFixed(4), human_probability: +(1-smoothed).toFixed(4), label, confidence: +(conf * 100).toFixed(1) };
385
+ }
386
+
387
+ async function sendChunk(chunkData, mimeType) {
388
+ if (!chunkData.length) return;
389
+ const blob = new Blob(chunkData, { type: mimeType || 'audio/webm' });
390
+ if (blob.size < 20000) return;
391
+ const form = new FormData();
392
+ form.append('file', blob, 'audio.webm');
393
+ form.append('source', 'mic');
394
+ try {
395
+ const res = await fetch(`${API}/analyze`, { method: 'POST', body: form });
396
+ if (!res.ok) { console.error('Server error:', res.status); return; }
397
+ const raw = await res.json();
398
+ const data = smoothResult(raw);
399
+ document.getElementById('micAnalyzing').classList.remove('visible');
400
+ renderVerdict(data, 'mic');
401
+ } catch (err) {
402
+ console.error('Fetch error:', err);
403
+ }
404
+ }
405
+
406
+ function handleDrop(e) {
407
+ e.preventDefault();
408
+ document.getElementById('uploadZone').classList.remove('drag-over');
409
+ const file = e.dataTransfer.files[0];
410
+ if (file) analyzeFile(file);
411
+ }
412
+
413
+ function handleFileUpload(e) {
414
+ const file = e.target.files[0];
415
+ if (file) analyzeFile(file);
416
+ }
417
+
418
+ async function analyzeFile(file) {
419
+ document.getElementById('uploadIdle').style.display = 'none';
420
+ document.getElementById('uploadVerdict').classList.remove('visible');
421
+ document.getElementById('uploadAnalyzing').classList.add('visible');
422
+ const form = new FormData();
423
+ form.append('file', file, file.name);
424
+ form.append('source', 'file');
425
+ try {
426
+ const res = await fetch(`${API}/analyze`, { method: 'POST', body: form });
427
+ if (!res.ok) { alert(`Server error: ${res.status}`); document.getElementById('uploadAnalyzing').classList.remove('visible'); return; }
428
+ const data = await res.json();
429
+ document.getElementById('uploadAnalyzing').classList.remove('visible');
430
+ renderVerdict(data, 'upload');
431
+ } catch (err) {
432
+ console.error('Upload error:', err);
433
+ document.getElementById('uploadAnalyzing').classList.remove('visible');
434
+ document.getElementById('uploadIdle').style.display = 'block';
435
+ alert('Connection failed. Is the server running at port 8000?');
436
+ }
437
+ }
438
+
439
+ function renderVerdict(data, mode) {
440
+ const p = mode === 'mic' ? 'mic' : 'upload';
441
+ const isAI = data.label === 'AI Generated';
442
+ const isUnknown = !data.label || data.label === 'unknown' || data.label === 'listening...';
443
+
444
+ if (data.warning && data.warning !== 'chunk_too_small') {
445
+ document.getElementById(p+'Label').textContent = data.warning;
446
+ document.getElementById(p+'Badge').className = 'verdict-badge unknown';
447
+ document.getElementById(p+'Badge').textContent = '❓ Unknown';
448
+ document.getElementById(p+'Conf').textContent = '';
449
+ document.getElementById(p+'Verdict').classList.add('visible');
450
+ return;
451
+ }
452
+ if (data.warning === 'chunk_too_small') return;
453
+
454
+ document.getElementById(p+'Badge').className = 'verdict-badge ' + (isAI ? 'ai' : isUnknown ? 'unknown' : 'human');
455
+ document.getElementById(p+'Badge').textContent = isAI ? '⚠️ AI Detected' : isUnknown ? '❓ Unknown' : '✅ Authentic';
456
+
457
+ const tierClass = data.confidence_tier === 'High' ? 'tier-high' : data.confidence_tier === 'Medium' ? 'tier-medium' : 'tier-low';
458
+ const tierBadge = data.confidence_tier ? `<span class="confidence-tier ${tierClass}">${data.confidence_tier} confidence</span>` : '';
459
+ document.getElementById(p+'Label').innerHTML = (data.label || 'Unknown') + tierBadge;
460
+ document.getElementById(p+'Conf').textContent = `Confidence: ${data.confidence || 0}% · ${data.processing_ms || 0}ms`;
461
+
462
+ const aiProb = data.ai_probability || 0.5;
463
+ document.getElementById(p+'Bar').style.width = (aiProb * 100) + '%';
464
+ document.getElementById(p+'Bar').className = 'conf-bar-fill ' + (isAI ? 'ai' : '');
465
+
466
+ const grid = document.getElementById(p+'Grid');
467
+ grid.innerHTML = '';
468
+ if (data.feature_scores) {
469
+ Object.entries(data.feature_scores).forEach(([key, val]) => {
470
+ const pct = Math.round(val * 100);
471
+ const color = val > 0.6 ? '#E84B35' : val > 0.4 ? '#F59E0B' : '#22C55E';
472
+ grid.innerHTML += `<div class="feature-item">
473
+ <div class="feature-name">${key}</div>
474
+ <div class="feature-bar-track"><div class="feature-bar-fill" style="width:${pct}%;background:${color}"></div></div>
475
+ <div class="feature-score" style="color:${color}">${pct}%</div>
476
+ </div>`;
477
+ });
478
+ }
479
+
480
+ const indWrap = document.getElementById(p+'IndWrap');
481
+ const indList = document.getElementById(p+'IndList');
482
+ if (data.key_indicators && data.key_indicators.length) {
483
+ indWrap.style.display = 'block';
484
+ indList.innerHTML = data.key_indicators.map(i => `<div class="indicator-item">${i}</div>`).join('');
485
+ } else {
486
+ indWrap.style.display = 'none';
487
+ }
488
+
489
+ const chips = [];
490
+ if (data.duration_seconds) chips.push(`Duration: <strong>${data.duration_seconds}s</strong>`);
491
+ if (data.processing_ms) chips.push(`Processed: <strong>${data.processing_ms}ms</strong>`);
492
+ if (data.ai_probability) chips.push(`AI Score: <strong>${Math.round(data.ai_probability*100)}%</strong>`);
493
+ document.getElementById(p+'Meta').innerHTML = chips.map(c => `<div class="meta-chip">${c}</div>`).join('');
494
+
495
+ document.getElementById(p+'Verdict').classList.add('visible');
496
+ }
497
+ </script>
498
+
499
+ </body>
500
+ </html>
kiro.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "vocal-guard",
3
+ "displayName": "Vocal Guard — Deepfake Audio Detector",
4
+ "version": "1.0.0",
5
+ "runtime": "python3.11",
6
+ "entrypoint": "backend/app.py",
7
+ "port": 8000,
8
+ "build": {
9
+ "commands": [
10
+ "pip install -r requirements.txt"
11
+ ]
12
+ },
13
+ "start": {
14
+ "command": "uvicorn backend.app:app --host 0.0.0.0 --port 8000"
15
+ },
16
+ "aws": {
17
+ "region": "ap-south-1",
18
+ "services": {
19
+ "compute": "elastic-beanstalk",
20
+ "platform": "Python 3.11",
21
+ "tier": "WebServer",
22
+ "instanceType": "t3.small"
23
+ }
24
+ },
25
+ "environment": {
26
+ "PORT": "8000"
27
+ },
28
+ "healthCheck": "/health"
29
+ }
python.config ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ packages:
2
+ yum:
3
+ libsndfile-devel: []
4
+ ffmpeg: []
5
+
6
+ option_settings:
7
+ aws:elasticbeanstalk:application:environment:
8
+ PORT: "8000"
9
+ aws:elasticbeanstalk:container:python:
10
+ WSGIPath: "backend/app:app"
11
+ aws:elasticbeanstalk:environment:proxy:staticfiles:
12
+ /static: frontend