File size: 9,832 Bytes
3370983 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 |
"""
Supervisor Agent Router.
Handles chat interactions with the HR Supervisor Agent.
Supports both regular and streaming responses.
=============================================================================
ENDPOINTS:
=============================================================================
WITH CONTEXT ENGINEERING (CompactingSupervisor wrapper):
- POST /chat : Batch response with automatic context compaction
- POST /chat/stream : Streaming with context compaction [HAS ERRORS - TODO FIX]
RAW SUPERVISOR (Direct agent access, no wrapper):
- POST /raw/chat : Batch response, direct supervisor agent
- POST /raw/chat/stream : Streaming, direct supervisor agent [HAS ERRORS - TODO FIX]
UTILITY:
- POST /new : Create new chat session
- GET /health : Health check
=============================================================================
NOTE: Both streaming endpoints (/chat/stream and /raw/chat/stream) have
known issues that need to be fixed. Use batch endpoints (/chat or /raw/chat)
for reliable operation.
=============================================================================
"""
import json
import uuid
from fastapi import APIRouter, HTTPException
from fastapi.responses import StreamingResponse
from langchain_core.messages import HumanMessage
from src.backend.api.schemas.supervisor_chat import ChatRequest, ChatResponse, NewChatResponse
from src.backend.context_eng import compacting_supervisor, count_tokens_for_messages
from src.backend.agents.supervisor.supervisor_v2 import supervisor_agent
router = APIRouter()
@router.post("/chat", response_model=ChatResponse)
async def chat(request: ChatRequest) -> ChatResponse:
"""
Send a message to the HR Supervisor Agent and receive a response.
Uses CompactingSupervisor wrapper for automatic context management.
When token limit is exceeded, old messages are compacted/summarized.
The agent can:
- Query the candidate database
- Screen CVs
- Schedule calendar events
- Send emails via Gmail
Use the returned `thread_id` in subsequent requests to maintain conversation context.
"""
# Generate or use provided thread_id
thread_id = request.thread_id or str(uuid.uuid4())[:8]
try:
# Config for stateful conversation
config = {"configurable": {"thread_id": thread_id}}
# Invoke the compacting supervisor wrapper
response = compacting_supervisor.invoke(
{"messages": [HumanMessage(content=request.message)]},
config=config
)
# Extract response and calculate tokens
final_message = response["messages"][-1]
all_messages = response["messages"]
token_count = count_tokens_for_messages(all_messages)
return ChatResponse(
response=final_message.content,
thread_id=thread_id,
token_count=token_count,
)
except Exception as e:
raise HTTPException(
status_code=500,
detail=f"Agent execution failed: {str(e)}"
)
@router.post("/chat/stream")
async def chat_stream(request: ChatRequest):
"""
Stream a response from the HR Supervisor Agent using Server-Sent Events (SSE).
⚠️ WARNING: This endpoint has known issues and needs to be fixed.
Use /raw/chat/stream for reliable streaming, or /chat for batch requests.
Uses CompactingSupervisor wrapper for automatic context management.
Yields chunks as SSE events:
- event: token - A content token from the AI response
- event: done - Final message with metadata (token_count, thread_id)
- event: error - Error occurred
Use the returned `thread_id` in subsequent requests to maintain conversation context.
"""
thread_id = request.thread_id or str(uuid.uuid4())[:8]
def generate():
try:
config = {"configurable": {"thread_id": thread_id}}
for chunk in compacting_supervisor.stream(
{"messages": [HumanMessage(content=request.message)]},
config=config
):
if chunk["type"] == "token":
# SSE format: event type + data
yield f"event: token\ndata: {json.dumps({'content': chunk['content']})}\n\n"
elif chunk["type"] == "done":
yield f"event: done\ndata: {json.dumps({'thread_id': thread_id, 'token_count': chunk['token_count']})}\n\n"
elif chunk["type"] == "error":
yield f"event: error\ndata: {json.dumps({'error': chunk['content']})}\n\n"
except Exception as e:
yield f"event: error\ndata: {json.dumps({'error': str(e)})}\n\n"
return StreamingResponse(
generate(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no", # Disable nginx buffering
}
)
@router.post("/new", response_model=NewChatResponse)
async def new_chat() -> NewChatResponse:
"""
Create a new chat session with a fresh thread ID.
Returns a new thread_id to use for subsequent chat requests.
"""
thread_id = str(uuid.uuid4())[:8]
return NewChatResponse(
thread_id=thread_id,
message="New chat session created. Use the thread_id for your conversations.",
)
@router.get("/health")
async def supervisor_health():
"""Health check for supervisor router."""
return {"status": "healthy", "service": "supervisor"}
# =============================================================================
# RAW SUPERVISOR ENDPOINTS (No CompactingSupervisor wrapper)
# =============================================================================
@router.post("/raw/chat", response_model=ChatResponse)
async def raw_chat(request: ChatRequest) -> ChatResponse:
"""
Send a message to the raw HR Supervisor Agent (without context compaction).
This endpoint bypasses the CompactingSupervisor wrapper, giving direct access
to the underlying supervisor agent. Useful for debugging or when you want
full control over context management.
Use the returned `thread_id` in subsequent requests to maintain conversation context.
"""
thread_id = request.thread_id or str(uuid.uuid4())[:8]
try:
config = {"configurable": {"thread_id": thread_id}}
# Invoke the raw supervisor agent directly
response = supervisor_agent.invoke(
{"messages": [HumanMessage(content=request.message)]},
config=config
)
# Extract response and calculate tokens
final_message = response["messages"][-1]
all_messages = response["messages"]
token_count = count_tokens_for_messages(all_messages)
return ChatResponse(
response=final_message.content,
thread_id=thread_id,
token_count=token_count,
)
except Exception as e:
raise HTTPException(
status_code=500,
detail=f"Raw agent execution failed: {str(e)}"
)
@router.post("/raw/chat/stream")
async def raw_chat_stream(request: ChatRequest):
"""
Stream a response from the raw HR Supervisor Agent using Server-Sent Events (SSE).
⚠️ WARNING: This endpoint has known issues and needs to be fixed.
Use /raw/chat for reliable batch requests.
This endpoint bypasses the CompactingSupervisor wrapper, giving direct access
to the underlying supervisor agent's streaming capabilities.
Yields chunks as SSE events:
- event: token - A content token from the AI response
- event: done - Final message with metadata (token_count, thread_id)
- event: error - Error occurred
"""
thread_id = request.thread_id or str(uuid.uuid4())[:8]
def generate():
try:
config = {"configurable": {"thread_id": thread_id}}
full_response_content = ""
# Stream from the raw supervisor agent
for chunk in supervisor_agent.stream(
{"messages": [HumanMessage(content=request.message)]},
config=config,
stream_mode="messages"
):
# chunk is a tuple: (message, metadata)
message, metadata = chunk
# Only yield content from AI messages that have content
if hasattr(message, 'content') and message.content:
msg_type = message.__class__.__name__
if 'AIMessage' in msg_type:
yield f"event: token\ndata: {json.dumps({'content': message.content})}\n\n"
full_response_content += message.content
# Get final state for token counting
final_state = supervisor_agent.get_state(config)
token_count = 0
if final_state and hasattr(final_state, 'values'):
final_messages = final_state.values.get("messages", [])
token_count = count_tokens_for_messages(final_messages)
yield f"event: done\ndata: {json.dumps({'thread_id': thread_id, 'token_count': token_count})}\n\n"
except Exception as e:
yield f"event: error\ndata: {json.dumps({'error': str(e)})}\n\n"
return StreamingResponse(
generate(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no",
}
)
|