Spaces:
Sleeping
Sleeping
Commit
·
20a1017
1
Parent(s):
9a99098
add services in backend API folder
Browse files- .gitignore +1 -0
- backend/api/main.py +16 -0
- backend/api/services/intent_classifier.py +26 -0
- backend/api/services/llm_client.py +25 -0
- backend/api/services/prompt_builder.py +66 -0
- backend/api/services/redflag_detector.py +54 -0
- backend/api/services/tool_selector.py +27 -0
- backend/tests/test_intent.py +47 -0
- requirements.txt +3 -0
- start.bat +88 -0
.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
venv/
|
backend/api/main.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI
|
| 2 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 3 |
+
|
| 4 |
+
app = FastAPI()
|
| 5 |
+
|
| 6 |
+
app.add_middleware(
|
| 7 |
+
CORSMiddleware,
|
| 8 |
+
allow_origins=["*"],
|
| 9 |
+
allow_credentials=True,
|
| 10 |
+
allow_methods=["*"],
|
| 11 |
+
allow_headers=["*"],
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
@app.get("/health")
|
| 15 |
+
def health():
|
| 16 |
+
return {"status": "ok", "version": "1.0.0"}
|
backend/api/services/intent_classifier.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class IntentClassifier:
|
| 2 |
+
def __init__(self):
|
| 3 |
+
self.rag_keywords = [
|
| 4 |
+
"internal", "document", "policy", "report", "knowledge",
|
| 5 |
+
"tenant", "kb", "file", "pdf", "doc", "summarize"
|
| 6 |
+
]
|
| 7 |
+
|
| 8 |
+
self.web_keywords = [
|
| 9 |
+
"news", "latest", "search", "google", "web",
|
| 10 |
+
"internet", "what is happening", "external"
|
| 11 |
+
]
|
| 12 |
+
|
| 13 |
+
self.admin_keywords = [
|
| 14 |
+
"delete", "remove", "salary", "confidential",
|
| 15 |
+
"admin", "shutdown", "disable", "breach"
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
def classify(self, message: str) -> str:
|
| 19 |
+
if any(keyword in message.lower() for keyword in self.rag_keywords):
|
| 20 |
+
return "rag"
|
| 21 |
+
elif any(keyword in message.lower() for keyword in self.web_keywords):
|
| 22 |
+
return "web"
|
| 23 |
+
elif any(keyword in message.lower() for keyword in self.admin_keywords):
|
| 24 |
+
return "admin"
|
| 25 |
+
else:
|
| 26 |
+
return "general"
|
backend/api/services/llm_client.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
|
| 3 |
+
class LLMClient:
|
| 4 |
+
"""
|
| 5 |
+
Uses a LOCAL Llama model via Ollama.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
def __init__(self, model="llama3"):
|
| 9 |
+
self.model = model
|
| 10 |
+
self.url = "http://localhost:11434/api/generate"
|
| 11 |
+
|
| 12 |
+
def generate(self, prompt: str) -> str:
|
| 13 |
+
payload = {
|
| 14 |
+
"model": self.model,
|
| 15 |
+
"prompt": prompt,
|
| 16 |
+
"stream": False
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
try:
|
| 20 |
+
response = requests.post(self.url, json=payload)
|
| 21 |
+
data = response.json()
|
| 22 |
+
return data.get("response", "")
|
| 23 |
+
|
| 24 |
+
except Exception as e:
|
| 25 |
+
return f"⚠️ LOCAL LLM ERROR: {str(e)}"
|
backend/api/services/prompt_builder.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class PromptBuilder:
|
| 2 |
+
"""
|
| 3 |
+
Builds the final prompt sent to the LLM.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
def __init__(self):
|
| 7 |
+
pass
|
| 8 |
+
|
| 9 |
+
def build(
|
| 10 |
+
self,
|
| 11 |
+
user_message: str,
|
| 12 |
+
tool: str,
|
| 13 |
+
rag_results: list[str] = None,
|
| 14 |
+
web_results: list[dict] = None,
|
| 15 |
+
redflag_info: dict = None,
|
| 16 |
+
tenant_id: str = None
|
| 17 |
+
) -> str:
|
| 18 |
+
|
| 19 |
+
# 1. Base system prompt
|
| 20 |
+
system_prompt = (
|
| 21 |
+
"You are VariaAI, an enterprise-grade MCP agent. "
|
| 22 |
+
"Always follow tenant isolation: only use data belonging to the tenant. "
|
| 23 |
+
"Be concise, correct, and safe.\n\n"
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
# 2. Insert admin red-flag context (if triggered)
|
| 27 |
+
if tool == "admin":
|
| 28 |
+
system_prompt += (
|
| 29 |
+
"⚠️ SECURITY NOTICE:\n"
|
| 30 |
+
"The user request appears sensitive or unsafe.\n"
|
| 31 |
+
"Provide a safe response, avoid executing harmful instructions.\n\n"
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
# 3. Insert RAG context
|
| 35 |
+
if tool == "rag" and rag_results:
|
| 36 |
+
system_prompt += "📄 Relevant Knowledge Base Documents:\n"
|
| 37 |
+
for i, chunk in enumerate(rag_results, start=1):
|
| 38 |
+
system_prompt += f"({i}) {chunk}\n"
|
| 39 |
+
system_prompt += "\n"
|
| 40 |
+
|
| 41 |
+
# 4. Insert Web Search results
|
| 42 |
+
if tool == "web" and web_results:
|
| 43 |
+
system_prompt += "🌐 Web Search Results:\n"
|
| 44 |
+
for i, item in enumerate(web_results, start=1):
|
| 45 |
+
title = item.get("title")
|
| 46 |
+
snippet = item.get("snippet")
|
| 47 |
+
system_prompt += f"({i}) {title}\n {snippet}\n"
|
| 48 |
+
system_prompt += "\n"
|
| 49 |
+
|
| 50 |
+
# 5. Add tenant metadata (optional)
|
| 51 |
+
if tenant_id:
|
| 52 |
+
system_prompt += f"Tenant ID: {tenant_id}\n\n"
|
| 53 |
+
|
| 54 |
+
# 6. Insert user query
|
| 55 |
+
system_prompt += "🗣️ User Message:\n"
|
| 56 |
+
system_prompt += user_message + "\n\n"
|
| 57 |
+
|
| 58 |
+
# 7. Instructions for the model
|
| 59 |
+
system_prompt += (
|
| 60 |
+
"🎯 Your Task:\n"
|
| 61 |
+
"- Answer using the context above.\n"
|
| 62 |
+
"- If context is missing, say so.\n"
|
| 63 |
+
"- Do not hallucinate facts.\n"
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
return system_prompt
|
backend/api/services/redflag_detector.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
|
| 3 |
+
class RedFlagDetector:
|
| 4 |
+
def __init__(self):
|
| 5 |
+
# Built-in system red flags
|
| 6 |
+
self.core_flags = [
|
| 7 |
+
"delete all data",
|
| 8 |
+
"wipe database",
|
| 9 |
+
"salary",
|
| 10 |
+
"confidential",
|
| 11 |
+
"password",
|
| 12 |
+
"secret",
|
| 13 |
+
"credential",
|
| 14 |
+
"token",
|
| 15 |
+
"ssh key",
|
| 16 |
+
"api key"
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
# Regex patterns for sensitive info
|
| 20 |
+
self.regex_patterns = {
|
| 21 |
+
"email": r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-z]{2,}",
|
| 22 |
+
"credit_card": r"\b(?:\d[ -]*?){13,16}\b",
|
| 23 |
+
"ssn": r"\b\d{3}-\d{2}-\d{4}\b",
|
| 24 |
+
"api_key": r"(?i)(apikey|api_key|token)[=:]\s?[A-Za-z0-9-_]{10,}"
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
def check(self, message: str, tenant_rules: list[str] = None) -> dict:
|
| 28 |
+
"""
|
| 29 |
+
Returns a dictionary describing any red-flag detected.
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
message_lower = message.lower()
|
| 33 |
+
flags_detected = []
|
| 34 |
+
|
| 35 |
+
# 1. Built-in system flags
|
| 36 |
+
for keyword in self.core_flags:
|
| 37 |
+
if keyword in message_lower:
|
| 38 |
+
flags_detected.append(f"SystemFlag: {keyword}")
|
| 39 |
+
|
| 40 |
+
# 2. Tenant-defined rules
|
| 41 |
+
if tenant_rules:
|
| 42 |
+
for rule in tenant_rules:
|
| 43 |
+
if rule.lower() in message_lower:
|
| 44 |
+
flags_detected.append(f"TenantFlag: {rule}")
|
| 45 |
+
|
| 46 |
+
# 3. Regex detection
|
| 47 |
+
for name, pattern in self.regex_patterns.items():
|
| 48 |
+
if re.search(pattern, message):
|
| 49 |
+
flags_detected.append(f"RegexMatch: {name}")
|
| 50 |
+
|
| 51 |
+
return {
|
| 52 |
+
"is_redflag": len(flags_detected) > 0,
|
| 53 |
+
"matches": flags_detected
|
| 54 |
+
}
|
backend/api/services/tool_selector.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class ToolSelector:
|
| 2 |
+
"""
|
| 3 |
+
Decides which MCP tool (if any) should be used by the agent.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
def select_tool(self, intent: str, redflag: dict) -> str:
|
| 7 |
+
"""
|
| 8 |
+
Returns one of:
|
| 9 |
+
- "admin"
|
| 10 |
+
- "rag"
|
| 11 |
+
- "web"
|
| 12 |
+
- "none"
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
# 1. Admin takes highest priority if any red-flag is present
|
| 16 |
+
if redflag.get("is_redflag", False):
|
| 17 |
+
return "admin"
|
| 18 |
+
|
| 19 |
+
# 2. Intent-based selection
|
| 20 |
+
if intent == "rag":
|
| 21 |
+
return "rag"
|
| 22 |
+
|
| 23 |
+
if intent == "web":
|
| 24 |
+
return "web"
|
| 25 |
+
|
| 26 |
+
# 3. Default
|
| 27 |
+
return "none"
|
backend/tests/test_intent.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
|
| 4 |
+
# Add backend directory to Python path
|
| 5 |
+
backend_dir = Path(__file__).parent.parent
|
| 6 |
+
sys.path.insert(0, str(backend_dir))
|
| 7 |
+
|
| 8 |
+
from api.services.intent_classifier import IntentClassifier
|
| 9 |
+
from api.services.llm_client import LLMClient
|
| 10 |
+
from api.services.redflag_detector import RedFlagDetector
|
| 11 |
+
from api.services.tool_selector import ToolSelector
|
| 12 |
+
from api.services.prompt_builder import PromptBuilder
|
| 13 |
+
|
| 14 |
+
clf = IntentClassifier()
|
| 15 |
+
detector = RedFlagDetector()
|
| 16 |
+
selector = ToolSelector()
|
| 17 |
+
builder = PromptBuilder()
|
| 18 |
+
llm = LLMClient()
|
| 19 |
+
|
| 20 |
+
print("RAG:", clf.classify("summarize internal policy"))
|
| 21 |
+
print("WEB:", clf.classify("latest news about ai"))
|
| 22 |
+
print("ADMIN:", clf.classify("delete all data"))
|
| 23 |
+
print("GENERAL:", clf.classify("hi how are you"))
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
print(detector.check("My email is test@gmail.com"))
|
| 27 |
+
print(detector.check("delete all data now"))
|
| 28 |
+
print(detector.check("confidential salary report"))
|
| 29 |
+
print(detector.check("hello world"))
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
print("admin:", selector.select_tool("web", {"is_redflag": True}))
|
| 33 |
+
print("rag:", selector.select_tool("rag", {"is_redflag": False}))
|
| 34 |
+
print("web:", selector.select_tool("web", {"is_redflag": False}))
|
| 35 |
+
print("none:", selector.select_tool("general", {"is_redflag": False}))
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
prompt = builder.build(
|
| 39 |
+
user_message="summarize this",
|
| 40 |
+
tool="rag",
|
| 41 |
+
rag_results=["Document text part 1", "Document text part 2"],
|
| 42 |
+
tenant_id="tenant123"
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
print(prompt)
|
| 46 |
+
|
| 47 |
+
print(llm.generate("Hello Llama!"))
|
requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn
|
| 3 |
+
requests
|
start.bat
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@echo off
|
| 2 |
+
echo ========================================
|
| 3 |
+
echo Starting IntegraChat Backend Server
|
| 4 |
+
echo ========================================
|
| 5 |
+
echo.
|
| 6 |
+
|
| 7 |
+
REM Get the directory where the batch file is located
|
| 8 |
+
cd /d "%~dp0"
|
| 9 |
+
|
| 10 |
+
REM Navigate to backend directory
|
| 11 |
+
if not exist "backend" (
|
| 12 |
+
echo ERROR: backend directory not found!
|
| 13 |
+
echo Please make sure you're running this from the project root.
|
| 14 |
+
pause
|
| 15 |
+
exit /b 1
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
cd backend
|
| 19 |
+
|
| 20 |
+
REM Check if Python is available
|
| 21 |
+
python --version >nul 2>&1
|
| 22 |
+
if errorlevel 1 (
|
| 23 |
+
echo ERROR: Python is not installed or not in PATH!
|
| 24 |
+
echo Please install Python 3.10+ and add it to your PATH.
|
| 25 |
+
pause
|
| 26 |
+
exit /b 1
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
echo Python version:
|
| 30 |
+
python --version
|
| 31 |
+
echo.
|
| 32 |
+
|
| 33 |
+
REM Check if virtual environment exists and activate it
|
| 34 |
+
if exist "venv\Scripts\activate.bat" (
|
| 35 |
+
echo Activating virtual environment...
|
| 36 |
+
call venv\Scripts\activate.bat
|
| 37 |
+
echo.
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
REM Check if dependencies are installed
|
| 41 |
+
echo Checking dependencies...
|
| 42 |
+
python -c "import fastapi" >nul 2>&1
|
| 43 |
+
if errorlevel 1 (
|
| 44 |
+
echo FastAPI not found. Installing dependencies...
|
| 45 |
+
if exist "..\requirements.txt" (
|
| 46 |
+
pip install -r ..\requirements.txt
|
| 47 |
+
) else if exist "requirements.txt" (
|
| 48 |
+
pip install -r requirements.txt
|
| 49 |
+
) else (
|
| 50 |
+
echo Installing FastAPI and Uvicorn...
|
| 51 |
+
pip install fastapi uvicorn
|
| 52 |
+
)
|
| 53 |
+
echo.
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
REM Check if .env file exists
|
| 57 |
+
if not exist ".env" (
|
| 58 |
+
echo Warning: .env file not found!
|
| 59 |
+
echo Please create .env file from .env.example if needed.
|
| 60 |
+
echo.
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
REM Start the FastAPI server
|
| 64 |
+
echo ========================================
|
| 65 |
+
echo Starting FastAPI server...
|
| 66 |
+
echo Server will be available at:
|
| 67 |
+
echo http://localhost:8000
|
| 68 |
+
echo http://127.0.0.1:8000
|
| 69 |
+
echo ========================================
|
| 70 |
+
echo.
|
| 71 |
+
echo Press Ctrl+C to stop the server
|
| 72 |
+
echo.
|
| 73 |
+
|
| 74 |
+
python -m uvicorn api.main:app --reload --host 0.0.0.0 --port 8000
|
| 75 |
+
|
| 76 |
+
if errorlevel 1 (
|
| 77 |
+
echo.
|
| 78 |
+
echo ERROR: Failed to start the server!
|
| 79 |
+
echo.
|
| 80 |
+
echo Common issues:
|
| 81 |
+
echo 1. Make sure you're in the backend directory
|
| 82 |
+
echo 2. Check if api/main.py exists
|
| 83 |
+
echo 3. Verify all dependencies are installed
|
| 84 |
+
echo.
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
pause
|
| 88 |
+
|