File size: 3,601 Bytes
cd6f412
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28baf2e
cd6f412
 
 
 
 
 
 
 
 
 
28baf2e
 
cd6f412
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import logging
from fastapi import APIRouter, HTTPException, Body
from typing import Dict, Any

# Import our services, agents, and models
from insucompass.services.zip_client import get_geo_data_from_zip
from insucompass.core.models import GeoDataResponse, ChatRequest, ChatResponse
from insucompass.core.agents.profile_agent import profile_builder
from insucompass.core.agent_orchestrator import app as orchestrator # The compiled LangGraph app

from insucompass.services.database import get_db_connection, create_or_update_user_profile, get_user_profile

# Configure logging
logger = logging.getLogger(__name__)

# Create the API router
router = APIRouter()
logger = logging.getLogger(__name__)
router = APIRouter()

@router.get("/geodata/{zip_code}", response_model=GeoDataResponse)
def get_geolocation_data(zip_code: str):
    """Endpoint to get county, city, and state information from a given ZIP code."""
    if not zip_code.isdigit() or len(zip_code) != 5:
        raise HTTPException(status_code=400, detail="Invalid ZIP code format.")
    geo_data = get_geo_data_from_zip(zip_code)
    if not geo_data:
        raise HTTPException(status_code=404, detail="Could not find location data.")
    return GeoDataResponse(
        zip_code=zip_code, county=geo_data.county.replace(" County", ""),
        city=geo_data.city, state=geo_data.state, state_abbreviation=geo_data.state_abbr
    )

@router.post("/chat", response_model=ChatResponse)
async def chat(request: ChatRequest):
    """
    Handles the entire conversational flow using the unified LangGraph orchestrator.
    The orchestrator manages the state, including profile building and Q&A.
    """
    logger.info(f"Received unified chat request for thread_id: {request.thread_id}")
    
    # LangGraph needs a thread_id to save/load conversation state from its checkpointer
    thread_config = {"configurable": {"thread_id": request.thread_id}}
    
    # The graph's state is loaded automatically by LangGraph using the thread_id.
    # We only need to provide the inputs for the current turn.
    inputs = {
        "user_profile": request.user_profile,
        "user_message": request.message,
        "is_profile_complete": request.is_profile_complete,
        "conversation_history": request.conversation_history,
    }

    try:
        # We invoke the graph. It will load the previous state, run the necessary nodes,
        # and save the new state, all in one call.
        final_state = orchestrator.invoke(inputs, config=thread_config)
        
        # Extract the relevant data from the final state of the graph
        agent_response = final_state.get("generation")
        updated_profile = final_state.get("user_profile")
        updated_history = final_state.get("conversation_history")
        is_profile_complete = final_state.get("is_profile_complete")
        plan_recs = final_state.get("plan_recommendations")
        
        if not agent_response:
            agent_response = "I'm sorry, I encountered an issue. Could you please rephrase?"

        logger.info("Unified graph execution completed successfully.")
        
        return ChatResponse(
            agent_response=agent_response,
            updated_profile=updated_profile,
            updated_history=updated_history,
            is_profile_complete=is_profile_complete,
            plan_recommendations=plan_recs
        )

    except Exception as e:
        logger.error(f"Error during unified graph orchestration: {e}", exc_info=True)
        raise HTTPException(status_code=500, detail="An internal error occurred in the AI orchestrator.")