nothingworry's picture
working Tenant ID
c509b44
raw
history blame
4.65 kB
# =============================================================
# File: backend/api/routes/agent.py
# =============================================================
from fastapi import APIRouter
from pydantic import BaseModel
import os
import sys
from pathlib import Path
# Add backend to path for imports
backend_dir = Path(__file__).parent.parent.parent
sys.path.insert(0, str(backend_dir))
from api.services.agent_orchestrator import AgentOrchestrator
from api.models.agent import AgentRequest, AgentResponse
router = APIRouter()
orchestrator = AgentOrchestrator(
rag_mcp_url=os.getenv("RAG_MCP_URL", "http://localhost:8001"),
web_mcp_url=os.getenv("WEB_MCP_URL", "http://localhost:8002"),
admin_mcp_url=os.getenv("ADMIN_MCP_URL", "http://localhost:8003"),
llm_backend=os.getenv("LLM_BACKEND", "ollama")
)
class ChatRequest(BaseModel):
tenant_id: str
user_id: str | None = None
message: str
conversation_history: list[dict] = []
temperature: float = 0.0
@router.post("/message", response_model=AgentResponse)
async def agent_chat(req: ChatRequest):
agent_req = AgentRequest(
tenant_id=req.tenant_id,
user_id=req.user_id,
message=req.message,
conversation_history=req.conversation_history,
temperature=req.temperature
)
return await orchestrator.handle(agent_req)
@router.post("/debug")
async def agent_debug(req: ChatRequest):
"""
Returns detailed debugging information about agent reasoning.
Includes intent classification, tool selection, reasoning trace, and tool traces.
"""
agent_req = AgentRequest(
tenant_id=req.tenant_id,
user_id=req.user_id,
message=req.message,
conversation_history=req.conversation_history,
temperature=req.temperature
)
response = await orchestrator.handle(agent_req)
return {
"request": {
"tenant_id": req.tenant_id,
"user_id": req.user_id,
"message": req.message[:200],
"temperature": req.temperature
},
"response": {
"text": response.text[:500] + "..." if len(response.text) > 500 else response.text,
"decision": response.decision.dict() if response.decision else None,
"tool_traces": response.tool_traces,
"reasoning_trace": response.reasoning_trace
},
"debug_info": {
"intent": response.reasoning_trace[1].get("intent") if len(response.reasoning_trace) > 1 else None,
"tool_selection": next((t for t in response.reasoning_trace if t.get("step") == "tool_selection"), None),
"tool_scores": next((t for t in response.reasoning_trace if t.get("step") == "tool_scoring"), None),
"redflag_check": next((t for t in response.reasoning_trace if t.get("step") == "redflag_check"), None),
"total_steps": len(response.reasoning_trace)
}
}
@router.post("/plan")
async def agent_plan(req: ChatRequest):
"""
Returns only the agent's planning output (tool selection decision).
Useful for understanding what tools the agent would use without executing them.
"""
from ..services.intent_classifier import IntentClassifier
from ..services.tool_selector import ToolSelector
from ..services.tool_scoring import ToolScoringService
import os
# Create minimal orchestrator components for planning only
llm = orchestrator.llm
intent_classifier = IntentClassifier(llm_client=llm)
tool_selector = ToolSelector(llm_client=llm)
tool_scorer = ToolScoringService()
# Classify intent
intent = await intent_classifier.classify(req.message)
# Pre-fetch RAG for context (optional)
rag_results = []
try:
rag_prefetch = await orchestrator.mcp.call_rag(req.tenant_id, req.message)
if isinstance(rag_prefetch, dict):
rag_results = rag_prefetch.get("results") or rag_prefetch.get("hits") or []
except Exception:
pass
# Score tools
tool_scores = tool_scorer.score(req.message, intent, rag_results)
# Select tools
ctx = {
"tenant_id": req.tenant_id,
"rag_results": rag_results,
"tool_scores": tool_scores
}
decision = await tool_selector.select(intent, req.message, ctx)
return {
"tenant_id": req.tenant_id,
"message": req.message,
"intent": intent,
"tool_scores": tool_scores,
"plan": decision.dict(),
"steps": decision.tool_input.get("steps", []) if decision.tool_input else [],
"reason": decision.reason
}