destinyebuka commited on
Commit
fd08741
·
1 Parent(s): 9de9113
app/ai/agent/reply_intelligence.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Intelligent reply detection for AIDA.
3
+ Determines which message AIDA should quote when responding.
4
+ """
5
+
6
+ from typing import Optional, Dict, Any, List
7
+
8
+
9
+ def determine_reply_to_message(
10
+ conversation_history: List[Dict[str, Any]],
11
+ reply_context: Optional[Dict[str, Any]] = None
12
+ ) -> Optional[str]:
13
+ """
14
+ Analyze conversation and determine which message AIDA should quote.
15
+
16
+ Strategy:
17
+ 1. If user just replied to a specific message (reply_context exists),
18
+ quote the LATEST USER MESSAGE, not the message they swiped
19
+ 2. Otherwise, quote the most recent user message
20
+
21
+ Args:
22
+ conversation_history: List of messages with structure:
23
+ [{"id": str, "sender_id": str, "content": str, "is_ai": bool, ...}, ...]
24
+ reply_context: Optional context of message user replied to
25
+
26
+ Returns:
27
+ Message ID to quote, or None if no appropriate message found
28
+ """
29
+
30
+ if not conversation_history:
31
+ return None
32
+
33
+ # Find the most recent user message (not AI)
34
+ latest_user_message = None
35
+ for msg in conversation_history:
36
+ if not msg.get("is_ai", False):
37
+ latest_user_message = msg
38
+ break # conversation_history is sorted newest first
39
+
40
+ if latest_user_message:
41
+ return latest_user_message.get("id")
42
+
43
+ return None
44
+
45
+
46
+ def build_reply_context_for_response(
47
+ conversation_history: List[Dict[str, Any]],
48
+ reply_to_id: str
49
+ ) -> Optional[Dict[str, Any]]:
50
+ """
51
+ Build reply context for AIDA's response.
52
+
53
+ Returns:
54
+ {
55
+ "id": message_id,
56
+ "sender_name": sender name,
57
+ "content": message content
58
+ }
59
+ """
60
+
61
+ if not reply_to_id:
62
+ return None
63
+
64
+ # Find the message to quote
65
+ for msg in conversation_history:
66
+ if msg.get("id") == reply_to_id:
67
+ return {
68
+ "id": msg.get("id"),
69
+ "sender_name": msg.get("sender_name", "User"),
70
+ "content": msg.get("content", ""),
71
+ }
72
+
73
+ return None
app/ai/agent/schemas.py CHANGED
@@ -172,6 +172,10 @@ class AgentResponse(BaseModel):
172
  my_listings: Optional[List[Dict[str, Any]]] = None # For user's own listings
173
  tool_result: Optional[ToolResult] = None
174
  error: Optional[str] = None
 
 
 
 
175
  metadata: Dict[str, Any] = Field(default_factory=dict)
176
 
177
 
 
172
  my_listings: Optional[List[Dict[str, Any]]] = None # For user's own listings
173
  tool_result: Optional[ToolResult] = None
174
  error: Optional[str] = None
175
+ # Intelligent reply context
176
+ reply_to_id: Optional[str] = None
177
+ replied_to_content: Optional[str] = None
178
+ replied_to_sender: Optional[str] = None
179
  metadata: Dict[str, Any] = Field(default_factory=dict)
180
 
181
 
app/ai/routes/chat.py CHANGED
@@ -8,7 +8,7 @@ See app/ai/services/agent_executor.py for execution logic.
8
 
9
  from fastapi import APIRouter, Request, Response
10
  from pydantic import BaseModel
11
- from typing import Optional, Dict, Any
12
  from structlog import get_logger
13
  from uuid import uuid4
14
  from datetime import datetime
@@ -38,6 +38,7 @@ class AskBody(BaseModel):
38
  is_voice_message: Optional[bool] = False # True if message came from voice input
39
  source: Optional[str] = "default" # "dm", "search_bar", etc.
40
  reply_context: Optional[Dict[str, Any]] = None # Context of message user is replying to
 
41
 
42
 
43
 
@@ -88,6 +89,7 @@ async def ask_ai(request: Request, response: Response, body: AskBody) -> AgentRe
88
  is_voice_message=body.is_voice_message or False,
89
  source=body.source or "default",
90
  reply_context=body.reply_context,
 
91
  )
92
 
93
  logger.info(
 
8
 
9
  from fastapi import APIRouter, Request, Response
10
  from pydantic import BaseModel
11
+ from typing import Optional, Dict, Any, List
12
  from structlog import get_logger
13
  from uuid import uuid4
14
  from datetime import datetime
 
38
  is_voice_message: Optional[bool] = False # True if message came from voice input
39
  source: Optional[str] = "default" # "dm", "search_bar", etc.
40
  reply_context: Optional[Dict[str, Any]] = None # Context of message user is replying to
41
+ conversation_history: Optional[List[Dict[str, Any]]] = None # Full conversation for intelligent replies
42
 
43
 
44
 
 
89
  is_voice_message=body.is_voice_message or False,
90
  source=body.source or "default",
91
  reply_context=body.reply_context,
92
+ conversation_history=body.conversation_history,
93
  )
94
 
95
  logger.info(
app/ai/services/agent_executor.py CHANGED
@@ -21,7 +21,7 @@ import json
21
  import hashlib
22
  import logging
23
  from uuid import uuid4
24
- from typing import Optional, Dict, Any
25
  from dataclasses import dataclass
26
 
27
  from app.ai.agent.graph import get_aida_graph
@@ -47,6 +47,7 @@ class AgentInput:
47
  is_voice_message: bool = False
48
  source: str = "default"
49
  reply_context: Optional[Dict[str, Any]] = None
 
50
 
51
  def to_dict(self) -> Dict[str, Any]:
52
  """Convert to dict for graph invocation"""
@@ -63,12 +64,14 @@ class AgentInput:
63
  "source": self.source,
64
  "temp_data": {"reply_context": self.reply_context} if self.reply_context else {},
65
  }
66
-
67
  if self.start_new_session:
68
  result["conversation_history"] = []
69
  result["provided_fields"] = {}
70
  result["missing_required_fields"] = []
71
-
 
 
72
  return result
73
 
74
  @property
@@ -222,17 +225,38 @@ class AgentExecutor:
222
  def _extract_response(self, state: Dict[str, Any], input: AgentInput) -> AgentResponse:
223
  """Extract clean response from graph state"""
224
  temp_data = state.get("temp_data", {})
225
-
226
  # Check for pre-built response
227
  if final_response := temp_data.get("final_response"):
228
  return final_response
229
-
230
  # Build response from state
231
  response_text = temp_data.get("response_text") or "I'm here to help! What would you like to do?"
232
-
233
  current_flow = state.get("current_flow")
234
  flow_str = current_flow.value if hasattr(current_flow, 'value') else str(current_flow)
235
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236
  return AgentResponse(
237
  success=state.get("last_error") is None,
238
  text=response_text,
@@ -244,7 +268,12 @@ class AgentExecutor:
244
  },
245
  draft=temp_data.get("draft"),
246
  draft_ui=temp_data.get("draft_ui"),
 
 
247
  error=state.get("last_error"),
 
 
 
248
  metadata={
249
  "intent": state.get("intent_type"),
250
  "intent_confidence": state.get("intent_confidence", 0),
 
21
  import hashlib
22
  import logging
23
  from uuid import uuid4
24
+ from typing import Optional, Dict, Any, List
25
  from dataclasses import dataclass
26
 
27
  from app.ai.agent.graph import get_aida_graph
 
47
  is_voice_message: bool = False
48
  source: str = "default"
49
  reply_context: Optional[Dict[str, Any]] = None
50
+ conversation_history: Optional[List[Dict[str, Any]]] = None
51
 
52
  def to_dict(self) -> Dict[str, Any]:
53
  """Convert to dict for graph invocation"""
 
64
  "source": self.source,
65
  "temp_data": {"reply_context": self.reply_context} if self.reply_context else {},
66
  }
67
+
68
  if self.start_new_session:
69
  result["conversation_history"] = []
70
  result["provided_fields"] = {}
71
  result["missing_required_fields"] = []
72
+ elif self.conversation_history:
73
+ result["conversation_history"] = self.conversation_history
74
+
75
  return result
76
 
77
  @property
 
225
  def _extract_response(self, state: Dict[str, Any], input: AgentInput) -> AgentResponse:
226
  """Extract clean response from graph state"""
227
  temp_data = state.get("temp_data", {})
228
+
229
  # Check for pre-built response
230
  if final_response := temp_data.get("final_response"):
231
  return final_response
232
+
233
  # Build response from state
234
  response_text = temp_data.get("response_text") or "I'm here to help! What would you like to do?"
235
+
236
  current_flow = state.get("current_flow")
237
  flow_str = current_flow.value if hasattr(current_flow, 'value') else str(current_flow)
238
+
239
+ # Determine intelligent reply
240
+ reply_to_id = None
241
+ reply_to_context = None
242
+
243
+ if input.conversation_history:
244
+ from app.ai.agent.reply_intelligence import (
245
+ determine_reply_to_message,
246
+ build_reply_context_for_response
247
+ )
248
+
249
+ reply_to_id = determine_reply_to_message(
250
+ conversation_history=input.conversation_history,
251
+ reply_context=input.reply_context
252
+ )
253
+
254
+ if reply_to_id:
255
+ reply_to_context = build_reply_context_for_response(
256
+ conversation_history=input.conversation_history,
257
+ reply_to_id=reply_to_id
258
+ )
259
+
260
  return AgentResponse(
261
  success=state.get("last_error") is None,
262
  text=response_text,
 
268
  },
269
  draft=temp_data.get("draft"),
270
  draft_ui=temp_data.get("draft_ui"),
271
+ search_results=temp_data.get("search_results"),
272
+ my_listings=temp_data.get("my_listings"),
273
  error=state.get("last_error"),
274
+ reply_to_id=reply_to_id,
275
+ replied_to_content=reply_to_context["content"] if reply_to_context else None,
276
+ replied_to_sender=reply_to_context["sender_name"] if reply_to_context else None,
277
  metadata={
278
  "intent": state.get("intent_type"),
279
  "intent_confidence": state.get("intent_confidence", 0),