destinyebuka commited on
Commit
4db844a
·
1 Parent(s): 88604b7
Files changed (2) hide show
  1. app/ai/config.py +1 -1
  2. app/ai/tools/casual_chat_tool.py +125 -123
app/ai/config.py CHANGED
@@ -99,7 +99,7 @@ except Exception as e:
99
  try:
100
  from app.ai.tools.intent_detector_tool import process_user_message
101
  from app.ai.tools.listing_tool import process_listing
102
- from app.ai.tools.casual_chat_tool import process_casual_chat
103
  from app.ai.tools.greeting_tool import process_greeting, is_greeting # ✅ Now AI-powered & async!
104
 
105
  logger.info("✅ All Aida tools imported successfully")
 
99
  try:
100
  from app.ai.tools.intent_detector_tool import process_user_message
101
  from app.ai.tools.listing_tool import process_listing
102
+ from app.ai.tools.casual_chat_tool import process_casual_chat, casual_chat_handler
103
  from app.ai.tools.greeting_tool import process_greeting, is_greeting # ✅ Now AI-powered & async!
104
 
105
  logger.info("✅ All Aida tools imported successfully")
app/ai/tools/casual_chat_tool.py CHANGED
@@ -1,170 +1,172 @@
1
- # app/ai/agent/nodes/casual_chat.py
2
- """
3
- Node: Handle casual conversation with proper state transitions
4
- FIXED: Transitions to IDLE instead of COMPLETE to allow conversation continuation
5
- """
6
 
 
7
  from structlog import get_logger
8
  from langchain_openai import ChatOpenAI
9
- from langchain_core.messages import SystemMessage, HumanMessage
 
10
 
11
- from app.ai.agent.state import AgentState, FlowState
12
- from app.ai.agent.validators import ResponseValidator
13
- from app.ai.prompts.system_prompt import get_system_prompt
14
  from app.config import settings
 
15
 
16
  logger = get_logger(__name__)
17
 
18
- # Initialize LLM for casual conversation
19
  llm = ChatOpenAI(
20
  api_key=settings.DEEPSEEK_API_KEY,
21
  base_url=settings.DEEPSEEK_BASE_URL,
22
  model="deepseek-chat",
23
- temperature=0.8, # Higher temp for more natural conversation
 
24
  )
25
 
26
- def build_conversation_context(state: AgentState) -> str:
27
- """
28
- Build conversation context from history.
29
-
30
- Args:
31
- state: Agent state with conversation history
32
-
33
- Returns:
34
- Formatted conversation history string
35
- """
36
-
37
- messages = state.conversation_history[-6:] # Last 6 messages for context
38
-
39
- if not messages:
40
- return "(New conversation)"
41
 
42
  formatted = []
43
- for msg in messages:
44
- role = "User" if msg["role"] == "user" else "Aida"
45
- content = msg["content"]
46
  formatted.append(f"{role}: {content}")
47
 
48
  return "\n".join(formatted)
49
 
50
- async def casual_chat_handler(state: AgentState) -> AgentState:
 
 
 
 
 
 
51
  """
52
- Handle casual conversation with proper state transitions.
53
-
54
- FIXED: Transitions to IDLE instead of COMPLETE to allow conversation continuation
55
 
56
  Args:
57
- state: Agent state
 
 
 
58
 
59
  Returns:
60
- Updated state
 
 
 
 
 
61
  """
62
 
63
  logger.info(
64
- "Handling casual chat",
65
- user_id=state.user_id,
66
- message=state.last_user_message[:50]
67
  )
68
 
69
  try:
70
- # ============================================================
71
- # STEP 1: Build conversation context
72
- # ============================================================
73
-
74
- conv_context = build_conversation_context(state)
75
-
76
- logger.info("Conversation context built", context_len=len(conv_context))
77
-
78
- # ============================================================
79
- # STEP 2: Get system prompt
80
- # ============================================================
81
-
82
- system_prompt = get_system_prompt(user_role=state.user_role)
83
-
84
- logger.info("System prompt loaded", user_role=state.user_role)
85
-
86
- # ============================================================
87
- # STEP 3: Build chat prompt with context
88
- # ============================================================
89
-
90
- chat_prompt = f"""{system_prompt}
91
-
92
- CONVERSATION HISTORY:
93
- {conv_context}
94
-
95
- CURRENT USER MESSAGE: {state.last_user_message}
96
-
97
- Respond naturally and helpfully. Keep your response conversational and friendly (2-3 sentences max)."""
98
-
99
- # ============================================================
100
- # STEP 4: Call LLM for response
101
- # ============================================================
102
-
103
- response = await llm.ainvoke([
104
- SystemMessage(content="You are AIDA, a warm and helpful real estate AI assistant. Respond naturally to user questions."),
105
- HumanMessage(content=chat_prompt)
106
- ])
107
 
108
- response_text = response.content if hasattr(response, 'content') else str(response)
 
109
 
110
- logger.info("LLM response generated", response_len=len(response_text))
 
111
 
112
- # ============================================================
113
- # STEP 5: Validate response
114
- # ============================================================
 
 
 
115
 
116
- is_valid, cleaned_text, error = ResponseValidator.validate_response_text(response_text)
 
117
 
118
- if not is_valid:
119
- logger.warning("Response validation failed", error=error)
120
- # Use fallback
121
- cleaned_text = "I'm here to help with real estate questions. What would you like to know?"
122
  else:
123
- # Sanitize
124
- cleaned_text = ResponseValidator.sanitize_response(cleaned_text)
125
-
126
- logger.info("Response validated", text_len=len(cleaned_text))
127
-
128
- # ============================================================
129
- # STEP 6: Store in state
130
- # ============================================================
131
-
132
- state.temp_data["response_text"] = cleaned_text
133
- state.temp_data["action"] = "casual_chat"
134
-
135
- logger.info("Response stored in state", user_id=state.user_id)
136
-
137
- # ============================================================
138
- # STEP 7: ✅ FIXED - Transition to IDLE (not COMPLETE!)
139
- # ============================================================
140
-
141
- success, error = state.transition_to(FlowState.IDLE, reason="Casual chat completed, ready for next interaction")
142
-
143
- if not success:
144
- logger.error("Transition to IDLE failed", error=error)
145
- state.set_error(error, should_retry=False)
146
- return state
147
 
148
  logger.info(
149
- "Casual chat completed",
150
- user_id=state.user_id,
151
- steps=state.steps_taken
152
  )
153
 
154
- return state
 
 
 
 
 
 
 
 
155
 
156
  except Exception as e:
157
  logger.error("Casual chat error", exc_info=e)
158
- error_msg = f"Chat error: {str(e)}"
159
 
160
- # Set fallback response
161
- state.temp_data["response_text"] = "Sorry, I had a moment there! What were you saying?"
162
- state.temp_data["action"] = "casual_chat"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
 
164
- # Try to recover
165
- if state.set_error(error_msg, should_retry=True):
166
- state.transition_to(FlowState.IDLE, reason="Chat with error recovery")
167
- else:
168
- state.transition_to(FlowState.ERROR, reason="Casual chat error")
169
 
170
- return state
 
 
 
 
 
1
+ # app/ai/tools/casual_chat_tool.py
2
+ # FIXED: Added process_casual_chat function for backward compatibility
 
 
 
3
 
4
+ from typing import Dict, Optional
5
  from structlog import get_logger
6
  from langchain_openai import ChatOpenAI
7
+ from langchain_core.prompts import ChatPromptTemplate
8
+ from langchain_core.runnables import RunnablePassthrough
9
 
 
 
 
10
  from app.config import settings
11
+ from app.ai.prompts.system_prompt import get_system_prompt
12
 
13
  logger = get_logger(__name__)
14
 
15
+ # INITIALIZE LLM
16
  llm = ChatOpenAI(
17
  api_key=settings.DEEPSEEK_API_KEY,
18
  base_url=settings.DEEPSEEK_BASE_URL,
19
  model="deepseek-chat",
20
+ temperature=0.8,
21
+ max_tokens=500,
22
  )
23
 
24
+ # PROMPT TEMPLATE
25
+ CASUAL_CHAT_PROMPT = ChatPromptTemplate.from_messages([
26
+ ("system", "{system_prompt}"),
27
+ ("human", "{input}"),
28
+ ])
29
+
30
+ # MEMORY MANAGEMENT (simplified without deprecated classes)
31
+ def create_chat_history(conversation_history: list = None) -> str:
32
+ """Convert conversation history to formatted string"""
33
+ if not conversation_history:
34
+ return ""
 
 
 
 
35
 
36
  formatted = []
37
+ for msg in conversation_history[-10:]: # Last 10 messages for context
38
+ role = "Aida" if msg.get("role") == "assistant" else "You"
39
+ content = msg.get("content", "")
40
  formatted.append(f"{role}: {content}")
41
 
42
  return "\n".join(formatted)
43
 
44
+ # BACKWARD COMPATIBILITY: Keep the old function name for config.py imports
45
+ async def process_casual_chat(
46
+ user_message: str,
47
+ user_id: str,
48
+ user_role: str = "renter",
49
+ conversation_history: list = None,
50
+ ) -> Dict:
51
  """
52
+ Process casual chat message using LangChain LCEL (modern approach).
53
+ This function maintains backward compatibility with the old import.
 
54
 
55
  Args:
56
+ user_message: What user said
57
+ user_id: User ID
58
+ user_role: User's role (landlord or renter)
59
+ conversation_history: Previous messages in conversation
60
 
61
  Returns:
62
+ {
63
+ "success": bool,
64
+ "action": "casual_chat",
65
+ "reply": str (Aida's response),
66
+ "state": dict
67
+ }
68
  """
69
 
70
  logger.info(
71
+ "Processing casual chat with LangChain LCEL",
72
+ user_id=user_id,
73
+ message_len=len(user_message)
74
  )
75
 
76
  try:
77
+ # Get system prompt
78
+ system_prompt = get_system_prompt(user_role=user_role)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
+ # Format conversation history
81
+ chat_history = create_chat_history(conversation_history)
82
 
83
+ # Create LCEL chain (modern approach)
84
+ chain = CASUAL_CHAT_PROMPT | llm
85
 
86
+ # Prepare context
87
+ context = {
88
+ "system_prompt": system_prompt,
89
+ "input": user_message,
90
+ "chat_history": chat_history
91
+ }
92
 
93
+ # Run chain
94
+ response = await chain.ainvoke(context)
95
 
96
+ # Extract text from response
97
+ if hasattr(response, 'content'):
98
+ aida_reply = response.content.strip()
 
99
  else:
100
+ aida_reply = str(response).strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
  logger.info(
103
+ "Casual chat response generated",
104
+ reply_len=len(aida_reply)
 
105
  )
106
 
107
+ return {
108
+ "success": True,
109
+ "action": "casual_chat",
110
+ "reply": aida_reply,
111
+ "state": {
112
+ "status": "chatting",
113
+ "last_message_type": "casual_chat",
114
+ }
115
+ }
116
 
117
  except Exception as e:
118
  logger.error("Casual chat error", exc_info=e)
 
119
 
120
+ fallback_reply = "Sorry, I had a moment there! What were you saying?"
121
+
122
+ return {
123
+ "success": False,
124
+ "action": "casual_chat",
125
+ "reply": fallback_reply,
126
+ "state": {
127
+ "status": "chatting",
128
+ "last_message_type": "casual_chat",
129
+ "error": str(e)
130
+ }
131
+ }
132
+
133
+ # For LangGraph compatibility - the new function name
134
+ async def casual_chat_handler(user_message: str, user_id: str, user_role: str = "renter", conversation_history: list = None) -> Dict:
135
+ """
136
+ LangGraph-compatible version of casual chat processing
137
+ Just calls the process_casual_chat function
138
+ """
139
+ return await process_casual_chat(
140
+ user_message=user_message,
141
+ user_id=user_id,
142
+ user_role=user_role,
143
+ conversation_history=conversation_history
144
+ )
145
+
146
+ # TEST FUNCTION
147
+ async def test():
148
+ """Test the LangChain chat"""
149
+
150
+ test_messages = [
151
+ "Hi, how are you?",
152
+ "Who created you?",
153
+ "What's the weather like?",
154
+ "Can you help me list my apartment?",
155
+ ]
156
+
157
+ print("\nTesting LangChain Casual Chat\n" + "="*70 + "\n")
158
+
159
+ for message in test_messages:
160
+ print(f"User: {message}")
161
 
162
+ result = await process_casual_chat(
163
+ user_message=message,
164
+ user_id="test_user",
165
+ user_role="landlord"
166
+ )
167
 
168
+ print(f"Aida: {result['reply']}\n")
169
+
170
+ if __name__ == "__main__":
171
+ import asyncio
172
+ asyncio.run(test())