""" app.py — Face verification gate + chat console Overview - Uses notebook-produced artifacts (models/gallery_mean.npy, labels.json, threshold.json) as the identity gallery. These were generated offline (e.g., in Colab) with your preferred face embedding model. At runtime we avoid installing heavy packages on Windows. - Provides: - "/" : Upload form (name + photo) and verification gate - "/verify": Face verification endpoint - "/chat" : Simple chat UI powered by Groq API - "/api/*" : Chat and speech-to-text helpers """ from __future__ import annotations import os import time import ssl import smtplib import html import json from email.message import EmailMessage from flask import ( Flask, request, redirect, make_response, jsonify, send_from_directory, ) from werkzeug.utils import secure_filename # Lightweight runtime deps (no heavy model required on Windows) import cv2 import numpy as np import requests # ============================ # Application configuration # ============================ TITLE = "Face Verify Gate" BACKGROUND_IMG = "https://i.pinimg.com/originals/f6/7a/18/f67a1897acd0eb4c8824f214d4e48f9e.gif" # Flask and uploads APP_SECRET = os.getenv("APP_SECRET", "dev-secret") UPLOAD_DIR = os.getenv("UPLOAD_DIR", "uploads") os.makedirs(UPLOAD_DIR, exist_ok=True) # Optional email alerts (leave empty to disable) ALERT_EMAIL_TO = os.getenv("ALERT_EMAIL_TO", "") ALERT_EMAIL_FROM = os.getenv("ALERT_EMAIL_FROM", "") SMTP_HOST = os.getenv("SMTP_HOST", "smtp.gmail.com") SMTP_PORT = int(os.getenv("SMTP_PORT", "465")) SMTP_USER = os.getenv("SMTP_USER", "") SMTP_PASS = os.getenv("SMTP_PASS", "") # Groq API (demo key shown; use your own secret in production) GROQ_API_KEY = "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" GROQ_MODEL_CHAT = os.getenv("GROQ_MODEL_CHAT", "llama-3.1-8b-instant") GROQ_MODEL_STT = os.getenv("GROQ_MODEL_STT", "whisper-large-v3") # Echo mode short-circuits Groq for quick local testing ECHO_MODE = False # ============================ # Model artifacts (from notebook) # ============================ BASE_DIR = os.path.dirname(os.path.abspath(__file__)) MODELS_DIR = os.path.join(BASE_DIR, "models") GALLERY_NPY = os.path.join(MODELS_DIR, "gallery_mean.npy") # shape: (N, D), float32 LABELS_JSON = os.path.join(MODELS_DIR, "labels.json") # list[str], len N THRESH_JSON = os.path.join(MODELS_DIR, "threshold.json") # {"cosine_threshold": float} # In-memory state G: np.ndarray | None = None # (N, D) gallery templates, L2-normalized labels: list[str] | None = None COSINE_SIM_THRESHOLD: float = 0.65 # similarity threshold; higher = stricter # ============================ # Face detector (OpenCV Haar) # - Portable and good enough to crop the largest face region. # - You may later swap it with a stronger detector. # ============================ HAAR_PATH = cv2.data.haarcascades + "haarcascade_frontalface_default.xml" FACE_DETECTOR = cv2.CascadeClassifier(HAAR_PATH) # ============================ # Flask app # ============================ app = Flask(__name__) app.config["SECRET_KEY"] = APP_SECRET app.config["MAX_CONTENT_LENGTH"] = 16 * 1024 * 1024 # 16 MB upload cap # ============================ # Utilities # ============================ def send_alert_email(subject: str, body: str) -> None: """ Sends a simple email via SMTP_SSL when credentials are configured. Silently skips if SMTP env vars are missing. """ if not (SMTP_USER and SMTP_PASS and ALERT_EMAIL_TO): app.logger.warning("Email not configured; skipping alert.") return msg = EmailMessage() msg["Subject"] = subject msg["From"] = ALERT_EMAIL_FROM or SMTP_USER msg["To"] = ALERT_EMAIL_TO msg.set_content(body) ctx = ssl.create_default_context() with smtplib.SMTP_SSL(SMTP_HOST, SMTP_PORT, context=ctx) as s: s.login(SMTP_USER, SMTP_PASS) s.send_message(msg) app.logger.info("Alert email sent.") def save_upload(file_storage, prefix: str = "file") -> str: """ Persist an uploaded file to UPLOAD_DIR with a timestamped, sanitized filename. Returns the saved filesystem path. """ filename = f"{int(time.time())}_{secure_filename(file_storage.filename)}" path = os.path.join(UPLOAD_DIR, filename) file_storage.save(path) return path def cosine_distance(a: np.ndarray, b: np.ndarray) -> float: """ Cosine distance between vectors a and b in [0, 2]. 0 = identical direction, 1 = orthogonal, 2 = opposite. We typically operate in [0, 1] when vectors are non-negative. """ return 1.0 - float(np.dot(a, b) / ((np.linalg.norm(a) * np.linalg.norm(b)) + 1e-12)) # ============================ # Query “embedding” placeholder # - Keeps the end-to-end pipeline working on Windows without heavy installs. # - Replace `embed_query_vector` later with the same model used in the notebook. # ============================ def detect_and_crop_face(bgr: np.ndarray) -> np.ndarray | None: """ Detect the largest frontal face and return a cropped BGR image. Returns None if no face is detected. """ if bgr is None: return None gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY) faces = FACE_DETECTOR.detectMultiScale( gray, scaleFactor=1.2, minNeighbors=5, minSize=(60, 60) ) if len(faces) == 0: return None # Select the largest bounding box x, y, w, h = max(faces, key=lambda f: f[2] * f[3]) return bgr[y:y + h, x:x + w] def embed_query_vector(bgr: np.ndarray) -> np.ndarray | None: """ Build a normalized vector from the cropped face pixels. This is a temporary stand-in for a true neural embedding. Returns a float32 vector or None if face not found. """ crop = detect_and_crop_face(bgr) if crop is None or crop.size == 0: return None # Standardize geometry to reduce variance face = cv2.resize(crop, (112, 112), interpolation=cv2.INTER_LINEAR) # Normalize to unit-length vector (L2) vec = face.astype("float32").ravel() vec = vec / (np.linalg.norm(vec) + 1e-12) return vec.astype("float32") # ============================ # Artifact bootstrap # ============================ def bootstrap_artifacts() -> None: """ Load notebook-produced artifacts into memory: - G: (N, D) gallery templates (assumed L2-normalized) - labels: list of identity strings with length N - COSINE_DIST_THRESHOLD: float from threshold.json Raises FileNotFoundError if any artifact is missing. """ global G, labels, COSINE_DIST_THRESHOLD if not os.path.exists(GALLERY_NPY): raise FileNotFoundError(f"Missing: {GALLERY_NPY}") if not os.path.exists(LABELS_JSON): raise FileNotFoundError(f"Missing: {LABELS_JSON}") if not os.path.exists(THRESH_JSON): raise FileNotFoundError(f"Missing: {THRESH_JSON}") G = np.load(GALLERY_NPY).astype("float32") with open(LABELS_JSON, "r", encoding="utf-8") as f: labels = json.load(f) with open(THRESH_JSON, "r", encoding="utf-8") as f: config = json.load(f) COSINE_SIM_THRESHOLD = float(config.get("cosine_threshold", COSINE_SIM_THRESHOLD)) app.logger.info( "[bootstrap] gallery=%s labels=%d threshold=%.4f", None if G is None else tuple(G.shape), len(labels or []), COSINE_SIM_THRESHOLD, ) # ============================ # HTML Gate (upload form) # ============================ def render_gate(status_msg: str = ""): """ Render the landing page with a simple upload form (name + photo). """ status_msg = html.escape(status_msg or "") html_page = f""" {TITLE}
Operational • J.A.R.V.I.S. Security Core

J.A.R.V.I.S. verifies to protect what matters.

Adaptive identity verification for smooth and secure access.

{status_msg}

""" resp = make_response(html_page) resp.headers["Content-Type"] = "text/html; charset=utf-8" return resp @app.get("/") def index(): """Landing page with the verification gate.""" return render_gate("") # ============================ # Verification endpoint # ============================ def verify_face_identity(user_name: str, image_bytes: bytes) -> dict: """ Verify claimed identity by comparing a query face to a gallery template. Returns: dict with keys: - ok: bool (accepted / rejected) - score: float (cosine distance; lower is better) - threshold: float (decision boundary used) - reason: str | None (set on failure) """ global G, labels, COSINE_DIST_THRESHOLD # Lazy-load notebook artifacts on first request if G is None or labels is None: try: bootstrap_artifacts() except Exception as e: return {"ok": False, "reason": f"bootstrap_failed: {e}"} # Identity must exist in labels try: idx = labels.index(user_name) except ValueError: return {"ok": False, "reason": "Name Not Found"} # Decode uploaded image from bytes arr = np.frombuffer(image_bytes, np.uint8) bgr = cv2.imdecode(arr, cv2.IMREAD_COLOR) if bgr is None: return {"ok": False, "reason": "Invalid image"} # Create a lightweight query vector (replace with real embedding later) q = embed_query_vector(bgr) if q is None or not np.isfinite(q).all(): return {"ok": False, "reason": "No face detected"} # Lookup gallery template for this identity g = G[idx].astype("float32") # Cosine distance decision dist = cosine_distance(q, g) # distance (0 = identical, 1 = different) sim = 1.0 - dist # convert back to similarity accepted = sim >= COSINE_SIM_THRESHOLD return { "ok": bool(accepted), "score": float(sim), # now showing similarity "threshold": float(COSINE_SIM_THRESHOLD), "reason": None if accepted else "Not within threshold", } @app.post("/verify") def verify(): """ Handle form submission: - Save upload for audit/debug - Run verification - On success: redirect to chat console - On failure: show gate with status and (optionally) email an alert """ name = (request.form.get("name") or "").strip() file = request.files.get("photo") if not name or not file or not file.filename.strip(): return render_gate("Please enter a name and select an image."), 400 saved_path = save_upload(file, prefix="photo") with open(saved_path, "rb") as f: image_bytes = f.read() result = verify_face_identity(name, image_bytes) if not result.get("ok"): try: send_alert_email( f"[Access Denied] {name}", f"Denied file: {saved_path}\nReason: {result.get('reason')}", ) except Exception as e: app.logger.error("Email error: %s", e) return render_gate("Access denied."), 401 # Minimal session continuity via cookie resp = redirect("/chat", code=302) resp.set_cookie("user", name, httponly=False, samesite="Lax") return resp # ============================ # Chat UI (post-verification) # ============================ CHAT_HTML = """ J.A.R.V.I.S. Console
J.A.R.V.I.S. Console
""" @app.get("/chat") def chat_page(): """Return the chat console HTML.""" resp = make_response(CHAT_HTML) resp.headers["Content-Type"] = "text/html; charset=utf-8" return resp # ============================ # API: Chat (Groq) # ============================ SYSTEM_PROMPT = "You are J.A.R.V.I.S., a helpful, concise assistant. Keep answers short and practical." def call_groq_chat(messages: list[dict]) -> str: """ Call Groq's Chat Completions API. Returns the assistant message text. Set ECHO_MODE=True to bypass the API for local testing. """ if ECHO_MODE: return "Echo: " + messages[-1]["content"] url = "https://api.groq.com/openai/v1/chat/completions" headers = {"Authorization": f"Bearer {GROQ_API_KEY}", "Content-Type": "application/json"} data = {"model": GROQ_MODEL_CHAT, "messages": messages, "temperature": 0.3} r = requests.post(url, headers=headers, json=data, timeout=90) r.raise_for_status() j = r.json() return j["choices"][0]["message"]["content"].strip() @app.post("/api/chat") def api_chat(): """ Chat endpoint used by the UI. Accepts JSON {message:string}. """ try: payload = request.get_json(force=True, silent=True) or {} user_msg = (payload.get("message") or "").strip() if not user_msg: return jsonify(error="empty message"), 400 messages = [ {"role": "system", "content": SYSTEM_PROMPT}, {"role": "user", "content": user_msg}, ] reply = call_groq_chat(messages) return jsonify(reply=reply) except Exception as e: app.logger.exception("Chat error") return jsonify(error=str(e)), 500 # ============================ # API: Speech-to-Text (Groq Whisper) # ============================ @app.post("/api/speech_to_text") def api_speech_to_text(): """ Accepts multipart/form-data with 'audio' file (webm/ogg/mp4). Returns JSON {text:string} on success. """ f = request.files.get("audio") if not f: return jsonify(error="no audio"), 400 filename = f.filename or "voice.webm" lower = filename.lower() if lower.endswith(".ogg"): mime = "audio/ogg" elif lower.endswith(".mp4") or lower.endswith(".m4a"): mime = "audio/mp4" else: mime = "audio/webm" path = os.path.join(UPLOAD_DIR, f"voice_{int(time.time())}_{filename}") f.save(path) try: url = "https://api.groq.com/openai/v1/audio/transcriptions" headers = {"Authorization": f"Bearer {GROQ_API_KEY}"} with open(path, "rb") as fp: files = { "file": (os.path.basename(path), fp, mime), "model": (None, GROQ_MODEL_STT), } r = requests.post(url, headers=headers, files=files, timeout=180) if r.status_code >= 400: return jsonify(error=f"groq stt {r.status_code}: {r.text[:200]}"), 500 j = r.json() text = j.get("text") or j.get("transcript") or "" if not text: return jsonify(error="no text from STT"), 500 return jsonify(text=text) except Exception as e: app.logger.exception("STT error") return jsonify(error=str(e)), 500 # ============================ # Static uploads (debug convenience) # ============================ @app.get("/uploads/") def get_upload(fname: str): """Serve saved uploads for manual inspection/debugging.""" return send_from_directory(UPLOAD_DIR, fname) # ============================ # Entrypoint # ============================ if __name__ == "__main__": print( "Groq chat:", GROQ_MODEL_CHAT, "| STT:", GROQ_MODEL_STT, "| ECHO_MODE:", ECHO_MODE, "| Key set:", bool(GROQ_API_KEY), ) try: # Preload artifacts once (non-fatal if not present yet; will retry on /verify) bootstrap_artifacts() except Exception as e: print(f"[WARN] Bootstrap will retry on first verify: {e}") app.run(host="127.0.0.1", port=5000, debug=True)