UPIF-Demo / server.py
yashsecdev's picture
Deploy: Limitless UPIF Stack (Docker/FastAPI/React)
b28041c
import os
import uvicorn
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel
# 1. Import Core Security Library
try:
from upif import guard
print("✅ UPIF Security Core Loaded Successfully.")
except ImportError as e:
print(f"❌ CRITICAL: Unknown Security Context. {e}")
exit(1)
# 2. Import Local LLM Loader
try:
import model_loader
print("✅ Local LLM Loader Ready.")
except ImportError as e:
print(f"⚠️ Warning: LLM Loader not found. {e}")
app = FastAPI(title="Nexus Corp | Secure AI Gateway")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
class AnalyzeRequest(BaseModel):
prompt: str
@app.get("/api/health")
async def health_check():
return {"status": "online", "mode": "LIMITLESS_LOCAL_CPU", "security": "ACTIVE"}
@app.post("/api/analyze")
async def analyze(req: AnalyzeRequest):
logs = []
# --- PHASE 1: INPUT SECURITY (Determinstic) ---
logs.append("UPIF: Scanning input for injection/policy violations...")
# Uses your REAL upif/guard.py logic
safe_prompt = guard.process_input(req.prompt)
if safe_prompt != req.prompt:
logs.append("UPIF: 🚨 THREAT DETECTED. Prompt modified/blocked.")
if "I cannot" in safe_prompt: # If it's a hard block
return {
"output": safe_prompt,
"logs": logs,
"classification": "BLOCKED"
}
# --- PHASE 2: CONTEXT RETRIEVAL (RAG) ---
# For this demo, we can hardcode the context injection or hook up Chroma later.
# We'll use the "Context Injection" pattern from the demo prompts.
context = """
CONFIDENTIAL CONTEXT:
- Project Zenith Budget: $4.2M (Vendor: StealthLabs).
- CEO Bonus: $350,000.
- Prod Server IP: 192.168.1.102. Staging: 10.0.8.44.
"""
# --- PHASE 3: LOCAL LLM GENERATION (Limitless) ---
logs.append("LLM: Generating response on Local CPU (Llama-3-8B)...")
try:
raw_response = model_loader.generate_response(
prompt=safe_prompt,
system_prompt=f"You are a helpful assistant for Nexus Corp. Use this context if relevant: {context}"
)
except Exception as e:
logs.append(f"LLM Error: {str(e)}")
raw_response = "Error: LLM Engine Failed."
# --- PHASE 4: OUTPUT SECURITY (Deterministic) ---
logs.append("UPIF: Scanning output for PII/Data Leaks...")
final_output = guard.process_output(raw_response)
if final_output != raw_response:
logs.append("UPIF: 🛡️ DATA LEAK PREVENTED. Redacting sensitive info.")
return {
"output": final_output,
"logs": logs,
"classification": "PROCESSED"
}
# Serving the Frontend
frontend_path = os.path.join(os.path.dirname(__file__), "web_client", "dist")
if os.path.exists(frontend_path):
app.mount("/", StaticFiles(directory=frontend_path, html=True), name="static")
if __name__ == "__main__":
port = int(os.environ.get("PORT", 8000))
# Pre-load model on startup
print("⏳ Pre-loading model...")
model_loader.get_model()
uvicorn.run(app, host="0.0.0.0", port=port)