Spaces:
Runtime error
Runtime error
Commit Β·
3fcd87b
1
Parent(s): debee07
fyp
Browse files- app/ai/agent/nodes/greeting.py +24 -15
- app/ai/agent/state.py +3 -1
- app/ai/routes/chat_refactored.py +41 -30
app/ai/agent/nodes/greeting.py
CHANGED
|
@@ -2,6 +2,7 @@
|
|
| 2 |
"""
|
| 3 |
Node: Handle greeting flow.
|
| 4 |
User says hello/hi/greetings β respond warmly β end conversation.
|
|
|
|
| 5 |
"""
|
| 6 |
|
| 7 |
from structlog import get_logger
|
|
@@ -27,19 +28,25 @@ GREETING_PROMPT = """You are AIDA, a warm and friendly real estate AI assistant
|
|
| 27 |
|
| 28 |
User greeted you with: "{user_message}"
|
| 29 |
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
|
|
|
|
|
|
| 38 |
|
| 39 |
Examples:
|
| 40 |
-
-
|
| 41 |
-
-
|
| 42 |
-
-
|
| 43 |
|
| 44 |
Now respond to their greeting naturally (2-3 sentences only):"""
|
| 45 |
|
|
@@ -48,6 +55,8 @@ async def greeting_handler(state: AgentState) -> AgentState:
|
|
| 48 |
"""
|
| 49 |
Handle greeting flow.
|
| 50 |
|
|
|
|
|
|
|
| 51 |
Args:
|
| 52 |
state: Agent state with user's greeting message
|
| 53 |
|
|
@@ -55,7 +64,7 @@ async def greeting_handler(state: AgentState) -> AgentState:
|
|
| 55 |
Updated state with greeting response
|
| 56 |
|
| 57 |
Transitions to:
|
| 58 |
-
-
|
| 59 |
"""
|
| 60 |
|
| 61 |
logger.info(
|
|
@@ -107,13 +116,13 @@ async def greeting_handler(state: AgentState) -> AgentState:
|
|
| 107 |
logger.info("Greeting response stored in state", user_id=state.user_id)
|
| 108 |
|
| 109 |
# ============================================================
|
| 110 |
-
# STEP 4: Transition to COMPLETE
|
| 111 |
# ============================================================
|
| 112 |
|
| 113 |
-
success, error = state.transition_to(FlowState.
|
| 114 |
|
| 115 |
if not success:
|
| 116 |
-
logger.error("Transition to
|
| 117 |
state.set_error(error, should_retry=False)
|
| 118 |
return state
|
| 119 |
|
|
@@ -135,7 +144,7 @@ async def greeting_handler(state: AgentState) -> AgentState:
|
|
| 135 |
|
| 136 |
# Try to continue despite error
|
| 137 |
if state.set_error(error_msg, should_retry=True):
|
| 138 |
-
state.transition_to(FlowState.
|
| 139 |
else:
|
| 140 |
state.transition_to(FlowState.ERROR, reason="Greeting error")
|
| 141 |
|
|
|
|
| 2 |
"""
|
| 3 |
Node: Handle greeting flow.
|
| 4 |
User says hello/hi/greetings β respond warmly β end conversation.
|
| 5 |
+
FIXED: Transition to IDLE instead of COMPLETE
|
| 6 |
"""
|
| 7 |
|
| 8 |
from structlog import get_logger
|
|
|
|
| 28 |
|
| 29 |
User greeted you with: "{user_message}"
|
| 30 |
|
| 31 |
+
Generate a WARM, NATURAL response that:
|
| 32 |
+
1. Responds to their greeting in the SAME language they used
|
| 33 |
+
2. Matches their tone (formal/casual/friendly)
|
| 34 |
+
3. Introduces yourself briefly
|
| 35 |
+
4. Asks how you can help with real estate
|
| 36 |
|
| 37 |
+
Important:
|
| 38 |
+
- If they wrote in English β respond in English
|
| 39 |
+
- If they wrote in French β respond in French
|
| 40 |
+
- If they wrote in Spanish β respond in Spanish
|
| 41 |
+
- If they wrote in Yoruba β respond in Yoruba
|
| 42 |
+
- Detect language automatically and respond naturally
|
| 43 |
+
|
| 44 |
+
Keep it conversational (2-3 sentences max). Be genuine and warm.
|
| 45 |
|
| 46 |
Examples:
|
| 47 |
+
- "Hello!" β "Hello! π I'm AIDA, your real estate assistant. How can I help you find or list a property today?"
|
| 48 |
+
- "Hi there" β "Hi! π Great to see you! What can I do for you regarding real estate?"
|
| 49 |
+
- "Bonjour!" β "Bonjour! π Je suis AIDA, votre assistant immobilier. Comment puis-je vous aider?"
|
| 50 |
|
| 51 |
Now respond to their greeting naturally (2-3 sentences only):"""
|
| 52 |
|
|
|
|
| 55 |
"""
|
| 56 |
Handle greeting flow.
|
| 57 |
|
| 58 |
+
FIXED: Transitions to IDLE instead of COMPLETE
|
| 59 |
+
|
| 60 |
Args:
|
| 61 |
state: Agent state with user's greeting message
|
| 62 |
|
|
|
|
| 64 |
Updated state with greeting response
|
| 65 |
|
| 66 |
Transitions to:
|
| 67 |
+
- IDLE (greeting handled, ready for next interaction)
|
| 68 |
"""
|
| 69 |
|
| 70 |
logger.info(
|
|
|
|
| 116 |
logger.info("Greeting response stored in state", user_id=state.user_id)
|
| 117 |
|
| 118 |
# ============================================================
|
| 119 |
+
# STEP 4: Γ’Εβ¦ FIXED - Transition to IDLE (not COMPLETE!)
|
| 120 |
# ============================================================
|
| 121 |
|
| 122 |
+
success, error = state.transition_to(FlowState.IDLE, reason="Greeting handled, ready for next interaction")
|
| 123 |
|
| 124 |
if not success:
|
| 125 |
+
logger.error("Transition to IDLE failed", error=error)
|
| 126 |
state.set_error(error, should_retry=False)
|
| 127 |
return state
|
| 128 |
|
|
|
|
| 144 |
|
| 145 |
# Try to continue despite error
|
| 146 |
if state.set_error(error_msg, should_retry=True):
|
| 147 |
+
state.transition_to(FlowState.IDLE, reason="Greeting with error recovery")
|
| 148 |
else:
|
| 149 |
state.transition_to(FlowState.ERROR, reason="Greeting error")
|
| 150 |
|
app/ai/agent/state.py
CHANGED
|
@@ -107,6 +107,7 @@ class AgentState(BaseModel):
|
|
| 107 |
FlowState.CASUAL_CHAT,
|
| 108 |
FlowState.ERROR,
|
| 109 |
],
|
|
|
|
| 110 |
FlowState.GREETING: [
|
| 111 |
FlowState.IDLE,
|
| 112 |
FlowState.CLASSIFY_INTENT,
|
|
@@ -121,8 +122,9 @@ class AgentState(BaseModel):
|
|
| 121 |
FlowState.LISTING_COLLECT, # Go back if missing fields
|
| 122 |
FlowState.ERROR,
|
| 123 |
],
|
|
|
|
| 124 |
FlowState.LISTING_PUBLISH: [
|
| 125 |
-
FlowState.
|
| 126 |
FlowState.ERROR,
|
| 127 |
],
|
| 128 |
FlowState.SEARCH_QUERY: [
|
|
|
|
| 107 |
FlowState.CASUAL_CHAT,
|
| 108 |
FlowState.ERROR,
|
| 109 |
],
|
| 110 |
+
# β
FIXED: Greeting can transition to IDLE
|
| 111 |
FlowState.GREETING: [
|
| 112 |
FlowState.IDLE,
|
| 113 |
FlowState.CLASSIFY_INTENT,
|
|
|
|
| 122 |
FlowState.LISTING_COLLECT, # Go back if missing fields
|
| 123 |
FlowState.ERROR,
|
| 124 |
],
|
| 125 |
+
# β
FIXED: Publishing goes to IDLE (not COMPLETE)
|
| 126 |
FlowState.LISTING_PUBLISH: [
|
| 127 |
+
FlowState.IDLE,
|
| 128 |
FlowState.ERROR,
|
| 129 |
],
|
| 130 |
FlowState.SEARCH_QUERY: [
|
app/ai/routes/chat_refactored.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
# app/ai/routes/chat_refactored.py
|
| 2 |
"""
|
| 3 |
AIDA Chat Endpoint - LangGraph Powered (PRIMARY - v1)
|
| 4 |
-
Properly
|
| 5 |
"""
|
| 6 |
|
| 7 |
from fastapi import APIRouter, HTTPException
|
|
@@ -42,15 +42,15 @@ async def ask_ai(body: AskBody) -> AgentResponse:
|
|
| 42 |
Main chat endpoint using LangGraph state machine.
|
| 43 |
|
| 44 |
CRITICAL FIX:
|
| 45 |
-
-
|
| 46 |
-
-
|
| 47 |
-
- Extract response from
|
| 48 |
|
| 49 |
Flow:
|
| 50 |
1. Validate input
|
| 51 |
2. Build input dict
|
| 52 |
3. Invoke graph with dict input
|
| 53 |
-
4. Extract
|
| 54 |
5. Return to client
|
| 55 |
"""
|
| 56 |
|
|
@@ -86,7 +86,7 @@ async def ask_ai(body: AskBody) -> AgentResponse:
|
|
| 86 |
# ============================================================
|
| 87 |
# STEP 2: Build input dict for graph
|
| 88 |
# ============================================================
|
| 89 |
-
#
|
| 90 |
|
| 91 |
input_dict = {
|
| 92 |
"user_id": user_id,
|
|
@@ -128,20 +128,23 @@ async def ask_ai(body: AskBody) -> AgentResponse:
|
|
| 128 |
)
|
| 129 |
|
| 130 |
# ============================================================
|
| 131 |
-
# STEP 4: Invoke graph with dict input
|
| 132 |
# ============================================================
|
| 133 |
|
| 134 |
logger.info("π Invoking LangGraph...", user_id=user_id)
|
| 135 |
|
| 136 |
try:
|
| 137 |
-
#
|
| 138 |
-
|
|
|
|
|
|
|
|
|
|
| 139 |
|
| 140 |
logger.info(
|
| 141 |
"β
LangGraph execution completed",
|
| 142 |
-
flow=
|
| 143 |
-
steps=
|
| 144 |
-
has_error=
|
| 145 |
)
|
| 146 |
|
| 147 |
except Exception as e:
|
|
@@ -154,17 +157,18 @@ async def ask_ai(body: AskBody) -> AgentResponse:
|
|
| 154 |
)
|
| 155 |
|
| 156 |
# ============================================================
|
| 157 |
-
# STEP 5: Extract response from final state
|
| 158 |
# ============================================================
|
| 159 |
|
| 160 |
-
#
|
| 161 |
-
|
|
|
|
| 162 |
|
| 163 |
if final_response:
|
| 164 |
logger.info(
|
| 165 |
"β
Response extracted from state",
|
| 166 |
-
action=final_response.action,
|
| 167 |
-
success=final_response.success,
|
| 168 |
)
|
| 169 |
return final_response
|
| 170 |
|
|
@@ -174,27 +178,34 @@ async def ask_ai(body: AskBody) -> AgentResponse:
|
|
| 174 |
|
| 175 |
logger.warning("β οΈ No final_response in temp_data, building manually")
|
| 176 |
|
| 177 |
-
response_text =
|
| 178 |
if not response_text:
|
| 179 |
response_text = "I'm here to help! What would you like to do?"
|
| 180 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 181 |
response = AgentResponse(
|
| 182 |
-
success=
|
| 183 |
text=response_text,
|
| 184 |
-
action=
|
| 185 |
state={
|
| 186 |
-
"flow":
|
| 187 |
-
"steps":
|
| 188 |
-
"errors":
|
| 189 |
},
|
| 190 |
-
draft=
|
| 191 |
-
draft_ui=
|
| 192 |
-
error=
|
| 193 |
metadata={
|
| 194 |
-
"intent":
|
| 195 |
-
"intent_confidence":
|
| 196 |
-
"language":
|
| 197 |
-
"messages_in_session": len(
|
| 198 |
"user_id": user_id,
|
| 199 |
"session_id": session_id,
|
| 200 |
}
|
|
|
|
| 1 |
# app/ai/routes/chat_refactored.py
|
| 2 |
"""
|
| 3 |
AIDA Chat Endpoint - LangGraph Powered (PRIMARY - v1)
|
| 4 |
+
FIXED: Properly handles dict output from graph.ainvoke()
|
| 5 |
"""
|
| 6 |
|
| 7 |
from fastapi import APIRouter, HTTPException
|
|
|
|
| 42 |
Main chat endpoint using LangGraph state machine.
|
| 43 |
|
| 44 |
CRITICAL FIX:
|
| 45 |
+
- graph.ainvoke() returns a DICT, not AgentState object
|
| 46 |
+
- Access dict keys with ['key'], not .attribute
|
| 47 |
+
- Extract response from dict['temp_data']['final_response']
|
| 48 |
|
| 49 |
Flow:
|
| 50 |
1. Validate input
|
| 51 |
2. Build input dict
|
| 52 |
3. Invoke graph with dict input
|
| 53 |
+
4. Extract final_response from returned dict
|
| 54 |
5. Return to client
|
| 55 |
"""
|
| 56 |
|
|
|
|
| 86 |
# ============================================================
|
| 87 |
# STEP 2: Build input dict for graph
|
| 88 |
# ============================================================
|
| 89 |
+
# β
CRITICAL: Pass dict, not AgentState
|
| 90 |
|
| 91 |
input_dict = {
|
| 92 |
"user_id": user_id,
|
|
|
|
| 128 |
)
|
| 129 |
|
| 130 |
# ============================================================
|
| 131 |
+
# STEP 4: Invoke graph with dict input
|
| 132 |
# ============================================================
|
| 133 |
|
| 134 |
logger.info("π Invoking LangGraph...", user_id=user_id)
|
| 135 |
|
| 136 |
try:
|
| 137 |
+
# β
Pass dict to ainvoke
|
| 138 |
+
final_state_dict = await graph.ainvoke(input_dict)
|
| 139 |
+
|
| 140 |
+
# β
CRITICAL: final_state_dict is a DICT, not AgentState!
|
| 141 |
+
# Access with dict keys: ['key'], not .attribute
|
| 142 |
|
| 143 |
logger.info(
|
| 144 |
"β
LangGraph execution completed",
|
| 145 |
+
flow=final_state_dict.get("current_flow", {}).get("value") if isinstance(final_state_dict.get("current_flow"), dict) else str(final_state_dict.get("current_flow")),
|
| 146 |
+
steps=final_state_dict.get("steps_taken"),
|
| 147 |
+
has_error=final_state_dict.get("last_error") is not None,
|
| 148 |
)
|
| 149 |
|
| 150 |
except Exception as e:
|
|
|
|
| 157 |
)
|
| 158 |
|
| 159 |
# ============================================================
|
| 160 |
+
# STEP 5: Extract response from final state dict
|
| 161 |
# ============================================================
|
| 162 |
|
| 163 |
+
# β
Access dict['key'] not dict.key
|
| 164 |
+
temp_data = final_state_dict.get("temp_data", {})
|
| 165 |
+
final_response = temp_data.get("final_response")
|
| 166 |
|
| 167 |
if final_response:
|
| 168 |
logger.info(
|
| 169 |
"β
Response extracted from state",
|
| 170 |
+
action=final_response.action if hasattr(final_response, 'action') else "unknown",
|
| 171 |
+
success=final_response.success if hasattr(final_response, 'success') else False,
|
| 172 |
)
|
| 173 |
return final_response
|
| 174 |
|
|
|
|
| 178 |
|
| 179 |
logger.warning("β οΈ No final_response in temp_data, building manually")
|
| 180 |
|
| 181 |
+
response_text = temp_data.get("response_text", "")
|
| 182 |
if not response_text:
|
| 183 |
response_text = "I'm here to help! What would you like to do?"
|
| 184 |
|
| 185 |
+
# Get flow state - handle both FlowState enum and string
|
| 186 |
+
current_flow = final_state_dict.get("current_flow")
|
| 187 |
+
if hasattr(current_flow, 'value'):
|
| 188 |
+
flow_str = current_flow.value
|
| 189 |
+
else:
|
| 190 |
+
flow_str = str(current_flow)
|
| 191 |
+
|
| 192 |
response = AgentResponse(
|
| 193 |
+
success=final_state_dict.get("last_error") is None,
|
| 194 |
text=response_text,
|
| 195 |
+
action=temp_data.get("action", flow_str),
|
| 196 |
state={
|
| 197 |
+
"flow": flow_str,
|
| 198 |
+
"steps": final_state_dict.get("steps_taken", 0),
|
| 199 |
+
"errors": final_state_dict.get("error_count", 0),
|
| 200 |
},
|
| 201 |
+
draft=temp_data.get("draft"),
|
| 202 |
+
draft_ui=temp_data.get("draft_ui"),
|
| 203 |
+
error=final_state_dict.get("last_error"),
|
| 204 |
metadata={
|
| 205 |
+
"intent": final_state_dict.get("intent_type"),
|
| 206 |
+
"intent_confidence": final_state_dict.get("intent_confidence", 0),
|
| 207 |
+
"language": final_state_dict.get("language_detected", "en"),
|
| 208 |
+
"messages_in_session": len(final_state_dict.get("conversation_history", [])),
|
| 209 |
"user_id": user_id,
|
| 210 |
"session_id": session_id,
|
| 211 |
}
|