akryldigital commited on
Commit
3c05bef
Β·
verified Β·
1 Parent(s): 05c2a69

reduce Note: ... fitler warning

Browse files
src/agents/multi_agent_chatbot_monolith.py ADDED
@@ -0,0 +1,1383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Multi-Agent RAG Chatbot using LangGraph
3
+ This system implements a 3-agent architecture:
4
+ 1. Main Agent: Handles conversation flow, follow-ups, and determines when to call RAG
5
+ 2. RAG Agent: Rewrites queries and applies filters for document retrieval
6
+ 3. Response Agent: Generates final answers from retrieved documents
7
+ Each agent has specialized prompts and responsibilities.
8
+ """
9
+ import re
10
+ import json
11
+ import time
12
+ import logging
13
+ import traceback
14
+ from pathlib import Path
15
+ from datetime import datetime
16
+ from dataclasses import dataclass
17
+ from typing import Dict, List, Any, Optional, TypedDict
18
+
19
+ from langchain_core.tools import tool
20
+ from langgraph.graph import StateGraph, END
21
+ from langchain_core.prompts import ChatPromptTemplate
22
+ from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
23
+
24
+
25
+ from src.pipeline import PipelineManager
26
+ from src.llm.adapters import get_llm_client
27
+ from src.config.paths import PROJECT_DIR, CONVERSATIONS_DIR
28
+ from src.config.loader import load_config, get_embedding_model_for_collection
29
+
30
+
31
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
32
+ logger = logging.getLogger(__name__)
33
+
34
+
35
+ @dataclass
36
+ class QueryContext:
37
+ """Context extracted from conversation"""
38
+ has_district: bool = False
39
+ has_source: bool = False
40
+ has_year: bool = False
41
+ extracted_district: Optional[str] = None
42
+ extracted_source: Optional[str] = None
43
+ extracted_year: Optional[str] = None
44
+ ui_filters: Dict[str, List[str]] = None
45
+ confidence_score: float = 0.0
46
+ needs_follow_up: bool = False
47
+ follow_up_question: Optional[str] = None
48
+
49
+
50
+ class MultiAgentState(TypedDict):
51
+ """State for the multi-agent conversation flow"""
52
+ conversation_id: str
53
+ messages: List[Any]
54
+ current_query: str
55
+ query_context: Optional[QueryContext]
56
+ rag_query: Optional[str]
57
+ rag_filters: Optional[Dict[str, Any]]
58
+ retrieved_documents: Optional[List[Any]]
59
+ final_response: Optional[str]
60
+ agent_logs: List[str]
61
+ conversation_context: Dict[str, Any]
62
+ session_start_time: float
63
+ last_ai_message_time: float
64
+
65
+
66
+ class MultiAgentRAGChatbot:
67
+ """Multi-agent RAG chatbot with specialized agents"""
68
+
69
+ def __init__(self, config_path: str = "src/config/settings.yaml"):
70
+ """Initialize the multi-agent chatbot"""
71
+ self.config = load_config(config_path)
72
+
73
+ # Get LLM provider from config
74
+ reader_config = self.config.get("reader", {})
75
+ default_type = reader_config.get("default_type", "INF_PROVIDERS")
76
+ provider_name = default_type.lower()
77
+
78
+ self.llm_adapter = get_llm_client(provider_name, self.config)
79
+
80
+ # Create a simple wrapper for LangChain compatibility
81
+ class LLMWrapper:
82
+ def __init__(self, adapter):
83
+ self.adapter = adapter
84
+
85
+ def invoke(self, messages):
86
+ # Convert LangChain messages to the format expected by the adapter
87
+ if isinstance(messages, list):
88
+ formatted_messages = []
89
+ for msg in messages:
90
+ if hasattr(msg, 'content'):
91
+ role = "user" if msg.__class__.__name__ == "HumanMessage" else "assistant"
92
+ formatted_messages.append({"role": role, "content": msg.content})
93
+ else:
94
+ formatted_messages.append({"role": "user", "content": str(msg)})
95
+ else:
96
+ formatted_messages = [{"role": "user", "content": str(messages)}]
97
+
98
+ # Use the adapter to get response
99
+ response = self.adapter.generate(formatted_messages)
100
+
101
+ # Return a mock response object
102
+ class MockResponse:
103
+ def __init__(self, content):
104
+ self.content = content
105
+
106
+ return MockResponse(response.content)
107
+
108
+ self.llm = LLMWrapper(self.llm_adapter)
109
+
110
+ # Initialize pipeline manager early to load models
111
+ logger.info("πŸ”„ Initializing pipeline manager and loading models...")
112
+ try:
113
+ self.pipeline_manager = PipelineManager(self.config)
114
+ logger.info("βœ… Pipeline manager initialized and models loaded")
115
+ except Exception as e:
116
+ logger.error(f"❌ Failed to initialize pipeline manager: {e}")
117
+ traceback.print_exc()
118
+ raise RuntimeError(f"Pipeline manager initialization failed: {e}")
119
+
120
+ # Connect to vector store
121
+ logger.info("πŸ”„ Connecting to vector store...")
122
+ try:
123
+ if not self.pipeline_manager.connect_vectorstore():
124
+ logger.error("❌ Failed to connect to vector store")
125
+ logger.error("πŸ’‘ Check that QDRANT_API_KEY environment variable is set")
126
+ logger.error("πŸ’‘ Check that Qdrant URL and collection name are correct in config")
127
+ raise RuntimeError("Vector store connection failed")
128
+ logger.info("βœ… Vector store connected successfully")
129
+ except RuntimeError:
130
+ raise # Re-raise RuntimeError as-is
131
+ except Exception as e:
132
+ logger.error(f"❌ Error during vector store connection: {e}")
133
+ traceback.print_exc()
134
+ raise RuntimeError(f"Vector store connection failed: {e}")
135
+
136
+ # Load dynamic data
137
+ self._load_dynamic_data()
138
+
139
+ # Build the multi-agent graph
140
+ self.graph = self._build_graph()
141
+
142
+ # Conversations directory - use PROJECT_DIR for local vs deployed compatibility
143
+ self.conversations_dir = CONVERSATIONS_DIR
144
+ try:
145
+ # Use 777 permissions for maximum compatibility (HF Spaces runs as different user)
146
+ self.conversations_dir.mkdir(parents=True, mode=0o777, exist_ok=True)
147
+ except (PermissionError, OSError) as e:
148
+ logger.warning(f"Could not create conversations directory at {self.conversations_dir}: {e}")
149
+ # Fallback to a relative path (current directory)
150
+ self.conversations_dir = Path("conversations")
151
+ try:
152
+ self.conversations_dir.mkdir(parents=True, mode=0o777, exist_ok=True)
153
+ except (PermissionError, OSError) as e2:
154
+ logger.error(f"Could not create conversations directory at {self.conversations_dir}: {e2}")
155
+ raise RuntimeError(f"Failed to create conversations directory: {e2}")
156
+
157
+ logger.info("πŸ€– Multi-Agent RAG Chatbot initialized")
158
+
159
+ def _load_dynamic_data(self):
160
+ """Load dynamic data from filter_options.json and add_district_metadata.py"""
161
+ # Load filter options - use PROJECT_DIR relative path
162
+ try:
163
+ fo = PROJECT_DIR / "src" / "config" / "filter_options.json"
164
+ if fo.exists():
165
+ with open(fo) as f:
166
+ data = json.load(f)
167
+ self.year_whitelist = [str(y).strip() for y in data.get("years", [])]
168
+ self.source_whitelist = [str(s).strip() for s in data.get("sources", [])]
169
+ self.district_whitelist = [str(d).strip() for d in data.get("districts", [])]
170
+ else:
171
+ # Fallback to default values
172
+ self.year_whitelist = ['2018', '2019', '2020', '2021', '2022', '2023', '2024']
173
+ self.source_whitelist = ['Consolidated', 'Local Government', 'Ministry, Department and Agency']
174
+ self.district_whitelist = ['Kampala', 'Gulu', 'Kalangala']
175
+ except Exception as e:
176
+ logger.warning(f"Could not load filter options: {e}")
177
+ self.year_whitelist = ['2018', '2019', '2020', '2021', '2022', '2023', '2024']
178
+ self.source_whitelist = ['Consolidated', 'Local Government', 'Ministry, Department and Agency']
179
+ self.district_whitelist = ['Kampala', 'Gulu', 'Kalangala']
180
+
181
+ # Enrich district list from add_district_metadata.py (if available)
182
+ try:
183
+ from add_district_metadata import DistrictMetadataProcessor
184
+ proc = DistrictMetadataProcessor()
185
+ names = set()
186
+ for key, mapping in proc.district_mappings.items():
187
+ if getattr(mapping, 'is_district', True):
188
+ names.add(mapping.name)
189
+ if names:
190
+ merged = list(self.district_whitelist)
191
+ for n in sorted(names):
192
+ if n not in merged:
193
+ merged.append(n)
194
+ self.district_whitelist = merged
195
+ logger.info(f"🧭 District whitelist enriched: {len(self.district_whitelist)} entries")
196
+ except Exception as e:
197
+ logger.info(f"ℹ️ Could not enrich districts: {e}")
198
+
199
+ # Calculate current year dynamically
200
+ self.current_year = str(datetime.now().year)
201
+ self.previous_year = str(datetime.now().year - 1)
202
+
203
+ # Log the actual filter values for debugging
204
+ logger.info(f"πŸ“Š ACTUAL FILTER VALUES:")
205
+ logger.info(f" Years: {self.year_whitelist}")
206
+ logger.info(f" Sources: {self.source_whitelist}")
207
+ logger.info(f" Districts: {len(self.district_whitelist)} districts (first 10: {self.district_whitelist[:10]})")
208
+
209
+ def _normalize_district_name(self, district: str) -> Optional[str]:
210
+ """Normalize district name with fuzzy matching for common misspellings."""
211
+ if not district:
212
+ return None
213
+
214
+ district = district.strip()
215
+
216
+ # Direct match
217
+ if district in self.district_whitelist:
218
+ return district
219
+
220
+ # Remove "District" suffix
221
+ district_name = district.replace(" District", "").replace(" district", "").strip()
222
+ if district_name in self.district_whitelist:
223
+ return district_name
224
+
225
+ # Common misspellings mapping
226
+ misspelling_map = {
227
+ "kalagala": "Kalangala",
228
+ "Kalagala": "Kalangala",
229
+ "KALAGALA": "Kalangala",
230
+ "kalangala": "Kalangala",
231
+ "gulu": "Gulu",
232
+ "GULU": "Gulu",
233
+ "kampala": "Kampala",
234
+ "KAMPALA": "Kampala",
235
+ }
236
+
237
+ # Check misspelling map (case-insensitive)
238
+ district_lower = district_name.lower()
239
+ if district_lower in misspelling_map:
240
+ corrected = misspelling_map[district_lower]
241
+ if corrected in self.district_whitelist:
242
+ return corrected
243
+
244
+ # Fuzzy matching for similar names (simple Levenshtein-like check)
245
+ # Check if the district name is very similar to any whitelist entry
246
+ for whitelist_district in self.district_whitelist:
247
+ # Case-insensitive comparison
248
+ if district_name.lower() == whitelist_district.lower():
249
+ return whitelist_district
250
+
251
+ # Check if one is a substring of the other (for partial matches)
252
+ if len(district_name) >= 4 and len(whitelist_district) >= 4:
253
+ if district_name.lower() in whitelist_district.lower() or whitelist_district.lower() in district_name.lower():
254
+ # Only return if it's a strong match (at least 80% of characters match)
255
+ min_len = min(len(district_name), len(whitelist_district))
256
+ max_len = max(len(district_name), len(whitelist_district))
257
+ if min_len / max_len >= 0.8:
258
+ return whitelist_district
259
+
260
+ return None
261
+
262
+ def _build_graph(self) -> StateGraph:
263
+ """Build the multi-agent LangGraph"""
264
+ graph = StateGraph(MultiAgentState)
265
+
266
+ # Add nodes for each agent
267
+ graph.add_node("main_agent", self._main_agent)
268
+ graph.add_node("rag_agent", self._rag_agent)
269
+ graph.add_node("response_agent", self._response_agent)
270
+
271
+ # Define the flow
272
+ graph.set_entry_point("main_agent")
273
+
274
+ # Main agent decides next step
275
+ graph.add_conditional_edges(
276
+ "main_agent",
277
+ self._should_call_rag,
278
+ {
279
+ "follow_up": END,
280
+ "call_rag": "rag_agent"
281
+ }
282
+ )
283
+
284
+ # RAG agent calls response agent
285
+ graph.add_edge("rag_agent", "response_agent")
286
+
287
+ # Response agent returns to main agent for potential follow-ups
288
+ graph.add_edge("response_agent", "main_agent")
289
+
290
+ return graph.compile()
291
+
292
+ def _should_call_rag(self, state: MultiAgentState) -> str:
293
+ """Determine if we should call RAG or ask follow-up"""
294
+ # If we already have a final response (from response agent), end
295
+ if state.get("final_response"):
296
+ return "follow_up"
297
+
298
+ context = state["query_context"]
299
+ if context and context.needs_follow_up:
300
+ return "follow_up"
301
+ return "call_rag"
302
+
303
+ def _main_agent(self, state: MultiAgentState) -> MultiAgentState:
304
+ """Main Agent: Handles conversation flow and follow-ups"""
305
+ logger.info("🎯 MAIN AGENT: Starting analysis")
306
+
307
+ # If we already have a final response from response agent, end gracefully
308
+ if state.get("final_response"):
309
+ logger.info("🎯 MAIN AGENT: Final response already exists, ending conversation flow")
310
+ return state
311
+
312
+ query = state["current_query"]
313
+ messages = state["messages"]
314
+
315
+ logger.info(f"🎯 MAIN AGENT: Extracting UI filters from query")
316
+ ui_filters = self._extract_ui_filters(query)
317
+ logger.info(f"🎯 MAIN AGENT: UI filters extracted: {ui_filters}")
318
+
319
+ # Analyze query context
320
+ logger.info(f"🎯 MAIN AGENT: Analyzing query context")
321
+ context = self._analyze_query_context(query, messages, ui_filters)
322
+
323
+ # Log agent decision
324
+ state["agent_logs"].append(f"MAIN AGENT: Context analyzed - district={context.has_district}, source={context.has_source}, year={context.has_year}")
325
+ logger.info(f"🎯 MAIN AGENT: Context analysis complete - district={context.has_district}, source={context.has_source}, year={context.has_year}")
326
+
327
+ # Store context
328
+ state["query_context"] = context
329
+
330
+ # If follow-up needed, generate response
331
+ if context.needs_follow_up:
332
+ logger.info(f"🎯 MAIN AGENT: Follow-up needed, generating question")
333
+ response = context.follow_up_question
334
+ state["final_response"] = response
335
+ state["last_ai_message_time"] = time.time()
336
+ logger.info(f"🎯 MAIN AGENT: Follow-up question generated: {response[:100]}...")
337
+ else:
338
+ logger.info("🎯 MAIN AGENT: No follow-up needed, proceeding to RAG")
339
+
340
+ return state
341
+
342
+ def _rag_agent(self, state: MultiAgentState) -> MultiAgentState:
343
+ """RAG Agent: Rewrites queries and applies filters"""
344
+ logger.info("πŸ” RAG AGENT: Starting query rewriting and filter preparation")
345
+
346
+ context = state["query_context"]
347
+ messages = state["messages"]
348
+
349
+ logger.info(f"πŸ” RAG AGENT: Context received - district={context.has_district}, source={context.has_source}, year={context.has_year}")
350
+
351
+ # Rewrite query for RAG
352
+ logger.info(f"πŸ” RAG AGENT: Rewriting query for optimal retrieval")
353
+ rag_query = self._rewrite_query_for_rag(messages, context)
354
+ logger.info(f"πŸ” RAG AGENT: Query rewritten: '{rag_query}'")
355
+
356
+ # Build filters
357
+ logger.info(f"πŸ” RAG AGENT: Building filters from context")
358
+ filters = self._build_filters(context)
359
+ logger.info(f"πŸ” RAG AGENT: Filters built: {filters}")
360
+
361
+ # Log RAG preparation
362
+ state["agent_logs"].append(f"RAG AGENT: Query='{rag_query}', Filters={filters}")
363
+
364
+ # Store for response agent
365
+ state["rag_query"] = rag_query
366
+ state["rag_filters"] = filters
367
+
368
+ logger.info(f"πŸ” RAG AGENT: Preparation complete, ready for retrieval")
369
+
370
+ return state
371
+
372
+ def _response_agent(self, state: MultiAgentState) -> MultiAgentState:
373
+ """Response Agent: Generates final answer from retrieved documents"""
374
+ logger.info("πŸ“ RESPONSE AGENT: Starting document retrieval and answer generation")
375
+
376
+ rag_query = state["rag_query"]
377
+ filters = state["rag_filters"]
378
+
379
+ logger.info(f"πŸ“ RESPONSE AGENT: Starting RAG retrieval with query: '{rag_query}'")
380
+ logger.info(f"πŸ“ RESPONSE AGENT: Using filters: {filters}")
381
+
382
+ # Perform RAG retrieval
383
+ logger.info(f"πŸ“ RESPONSE AGENT: Calling pipeline manager for retrieval")
384
+ logger.info(f"πŸ” ACTUAL RAG QUERY: '{rag_query}'")
385
+ logger.info(f"πŸ” ACTUAL FILTERS: {filters}")
386
+ try:
387
+ # Extract filenames from filters if present
388
+ filenames = filters.get("filenames") if filters else None
389
+
390
+ result = self.pipeline_manager.run(
391
+ query=rag_query,
392
+ sources=filters.get("sources") if filters else None,
393
+ auto_infer_filters=False,
394
+ filters=filters if filters else None
395
+ )
396
+
397
+ logger.info(f"πŸ“ RESPONSE AGENT: RAG retrieval completed - {len(result.sources)} documents retrieved")
398
+ logger.info(f"πŸ” RETRIEVAL DEBUG: Result type: {type(result)}")
399
+ logger.info(f"πŸ” RETRIEVAL DEBUG: Result sources type: {type(result.sources)}")
400
+ # logger.info(f"πŸ” RETRIEVAL DEBUG: Result metadata: {getattr(result, 'metadata', 'No metadata')}")
401
+
402
+ if len(result.sources) == 0:
403
+ logger.warning(f"⚠️ NO DOCUMENTS RETRIEVED: Query='{rag_query}', Filters={filters}")
404
+ logger.warning(f"⚠️ RETRIEVAL DEBUG: This could be due to:")
405
+ logger.warning(f" - Query too specific for available documents")
406
+ logger.warning(f" - Filters too restrictive")
407
+ logger.warning(f" - Vector store connection issues")
408
+ logger.warning(f" - Embedding model issues")
409
+ else:
410
+ logger.info(f"βœ… DOCUMENTS RETRIEVED: {len(result.sources)} documents found")
411
+ for i, doc in enumerate(result.sources[:3]): # Log first 3 docs
412
+ logger.info(f" Doc {i+1}: {getattr(doc, 'metadata', {}).get('filename', 'Unknown')[:50]}...")
413
+
414
+ state["retrieved_documents"] = result.sources
415
+ state["agent_logs"].append(f"RESPONSE AGENT: Retrieved {len(result.sources)} documents")
416
+
417
+ # Check highest similarity score
418
+ highest_score = 0.0
419
+ if result.sources:
420
+ # Check reranked_score first (more accurate), fallback to original_score
421
+ for doc in result.sources:
422
+ score = doc.metadata.get('reranked_score') or doc.metadata.get('original_score', 0.0)
423
+ if score > highest_score:
424
+ highest_score = score
425
+
426
+ logger.info(f"πŸ“ RESPONSE AGENT: Highest similarity score: {highest_score:.4f}")
427
+
428
+ # If highest score is too low, don't use retrieved documents
429
+ if highest_score <= 0.15:
430
+ logger.warning(f"⚠️ RESPONSE AGENT: Low similarity score ({highest_score:.4f} <= 0.15), using LLM knowledge only")
431
+ response = self._generate_conversational_response_without_docs(
432
+ state["current_query"],
433
+ state["messages"]
434
+ )
435
+ else:
436
+ # Generate conversational response with documents
437
+ logger.info(f"πŸ“ RESPONSE AGENT: Generating conversational response from {len(result.sources)} documents")
438
+ response = self._generate_conversational_response(
439
+ state["current_query"],
440
+ result.sources,
441
+ result.answer,
442
+ state["messages"]
443
+ )
444
+
445
+ logger.info(f"πŸ“ RESPONSE AGENT: Response generated: {response[:100]}...")
446
+
447
+ state["final_response"] = response
448
+ state["last_ai_message_time"] = time.time()
449
+
450
+ logger.info(f"πŸ“ RESPONSE AGENT: Answer generation complete")
451
+
452
+ except Exception as e:
453
+ logger.error(f"❌ RESPONSE AGENT ERROR: {e}")
454
+ state["final_response"] = "I apologize, but I encountered an error while retrieving information. Please try again."
455
+ state["last_ai_message_time"] = time.time()
456
+
457
+ return state
458
+
459
+ def _extract_ui_filters(self, query: str) -> Dict[str, List[str]]:
460
+ """Extract UI filters from query"""
461
+ filters = {}
462
+
463
+ # Look for FILTER CONTEXT in query
464
+ if "FILTER CONTEXT:" in query:
465
+ # Extract the entire filter section (until USER QUERY: or end of query)
466
+ filter_section = query.split("FILTER CONTEXT:")[1]
467
+ if "USER QUERY:" in filter_section:
468
+ filter_section = filter_section.split("USER QUERY:")[0]
469
+ filter_section = filter_section.strip()
470
+
471
+ # Parse sources
472
+ if "Sources:" in filter_section:
473
+ sources_line = [line for line in filter_section.split('\n') if line.strip().startswith('Sources:')][0]
474
+ sources_str = sources_line.split("Sources:")[1].strip()
475
+ if sources_str and sources_str != "None":
476
+ filters["sources"] = [s.strip() for s in sources_str.split(",")]
477
+
478
+ # Parse years
479
+ if "Years:" in filter_section:
480
+ years_line = [line for line in filter_section.split('\n') if line.strip().startswith('Years:')][0]
481
+ years_str = years_line.split("Years:")[1].strip()
482
+ if years_str and years_str != "None":
483
+ filters["years"] = [y.strip() for y in years_str.split(",")]
484
+
485
+ # Parse districts
486
+ if "Districts:" in filter_section:
487
+ districts_line = [line for line in filter_section.split('\n') if line.strip().startswith('Districts:')][0]
488
+ districts_str = districts_line.split("Districts:")[1].strip()
489
+ if districts_str and districts_str != "None":
490
+ filters["districts"] = [d.strip() for d in districts_str.split(",")]
491
+
492
+ # Parse filenames
493
+ if "Filenames:" in filter_section:
494
+ filenames_line = [line for line in filter_section.split('\n') if line.strip().startswith('Filenames:')][0]
495
+ filenames_str = filenames_line.split("Filenames:")[1].strip()
496
+ if filenames_str and filenames_str != "None":
497
+ filters["filenames"] = [f.strip() for f in filenames_str.split(",")]
498
+
499
+ return filters
500
+
501
+ def _analyze_query_context(self, query: str, messages: List[Any], ui_filters: Dict[str, List[str]]) -> QueryContext:
502
+ """Analyze query context using LLM"""
503
+ logger.info(f"πŸ” QUERY ANALYSIS: '{query[:50]}...' | UI filters: {ui_filters} | Messages: {len(messages)}")
504
+
505
+ # Build conversation context
506
+ conversation_context = ""
507
+ for i, msg in enumerate(messages[-6:]): # Last 6 messages
508
+ if isinstance(msg, HumanMessage):
509
+ conversation_context += f"User: {msg.content}\n"
510
+ elif isinstance(msg, AIMessage):
511
+ conversation_context += f"Assistant: {msg.content}\n"
512
+
513
+ # Create analysis prompt
514
+ analysis_prompt = ChatPromptTemplate.from_messages([
515
+ SystemMessage(content=f"""You are the Main Agent in an advanced multi-agent RAG system for audit report analysis.
516
+ 🎯 PRIMARY GOAL: Intelligently analyze user queries and determine the optimal conversation flow, whether that's answering directly, asking follow-ups, or proceeding to RAG retrieval.
517
+ 🧠 INTELLIGENCE LEVEL: You are a sophisticated conversational AI that can handle any type of user interaction - from greetings to complex audit queries.
518
+ πŸ“Š YOUR EXPERTISE: You specialize in analyzing audit reports from various sources (Local Government, Ministry, Hospital, etc.) across different years and districts in Uganda.
519
+ πŸ” AVAILABLE FILTERS:
520
+ - Years: {', '.join(self.year_whitelist)}
521
+ - Current year: {self.current_year}, Previous year: {self.previous_year}
522
+ - Sources: {', '.join(self.source_whitelist)}
523
+ - Districts: {', '.join(self.district_whitelist[:50])}... (and {len(self.district_whitelist)-50} more)
524
+ πŸŽ›οΈ UI FILTERS PROVIDED: {ui_filters}
525
+ πŸ“‹ UI FILTER HANDLING:
526
+ - If UI filters contain multiple values (e.g., districts: ['Lwengo', 'Kiboga']), extract ALL values
527
+ - For multiple districts: extract each district separately and validate each one
528
+ - For multiple years: extract each year separately and validate each one
529
+ - For multiple sources: extract each source separately and validate each one
530
+ - UI filters take PRIORITY over conversation context - use them first
531
+ 🧭 CONVERSATION FLOW INTELLIGENCE:
532
+ 1. **GREETINGS & GENERAL CHAT**:
533
+ - If user greets you ("Hi", "Hello", "How are you"), respond warmly and guide them to audit-related questions
534
+ - Example: "Hello! I'm here to help you analyze audit reports. What would you like to know about budget allocations, expenditures, or audit findings?"
535
+ 2. **EDGE CASES**:
536
+ - Handle "What can you do?", "Help", "I don't know what to ask" with helpful guidance
537
+ - Example: "I can help you analyze audit reports! Try asking about budget allocations, salary management, PDM implementation, or any specific audit findings."
538
+ 3. **AUDIT QUERIES**:
539
+ - Extract ONLY values that EXACTLY match the available lists above
540
+ - DO NOT hallucinate or infer values not in the lists
541
+ - If user mentions "salary payroll management" - this is NOT a valid source filter
542
+
543
+ **YEAR EXTRACTION**:
544
+ - If user mentions "2023" and it's in the years list - extract "2023"
545
+ - If user mentions "2022 / 23" - extract ["2022", "2023"] (as a JSON array)
546
+ - If user mentions "2022-2023" - extract ["2022", "2023"] (as a JSON array)
547
+ - If user mentions "latest couple of years" - extract the 2 most recent years from available data as JSON array
548
+ - Always return years as JSON arrays when multiple years are mentioned
549
+
550
+ **DISTRICT EXTRACTION**:
551
+ - If user mentions "Kampala" and it's in the districts list - extract "Kampala"
552
+ - If user mentions "Pader District" - extract "Pader" (remove "District" suffix)
553
+ - If user mentions "Lwengo, Kiboga and Namutumba" - extract ["Lwengo", "Kiboga", "Namutumba"] (as JSON array)
554
+ - If user mentions "Lwengo District and Kiboga District" - extract ["Lwengo", "Kiboga"] (as JSON array, remove "District" suffix)
555
+ - Always return districts as JSON arrays when multiple districts are mentioned
556
+ - **COMMON MISSPELLINGS**: Handle common misspellings intelligently:
557
+ * "Kalagala" (missing 'n') should be extracted as "Kalangala"
558
+ * "kalagala", "Kalagala", "KALAGALA" should all be normalized to "Kalangala"
559
+ * Similar case-insensitive variations should be normalized to the correct district name
560
+ - If no exact matches found, set extracted values to null
561
+ 4. **FILENAME FILTERING (MUTUALLY EXCLUSIVE)**:
562
+ - If UI provides filenames filter - ONLY use that, ignore all other filters (year, district, source)
563
+ - With filenames filter, no follow-ups needed - proceed directly to RAG
564
+ - When filenames are specified, skip filter inference entirely
565
+ 5. **HALLUCINATION PREVENTION**:
566
+ - If user asks about a specific report but NO filename is selected in UI and NONE is extracted from conversation - DO NOT hallucinate
567
+ - Clearly state: "I don't have any specific report selected. Could you please select a report from the list or tell me which report you'd like to analyze?"
568
+ - DO NOT pretend to know which report they mean
569
+ - DO NOT infer reports from context alone - only use explicitly mentioned reports
570
+ 6. **CONVERSATION CONTEXT AWARENESS**:
571
+ - ALWAYS consider the full conversation context when extracting filters
572
+ - If district was mentioned in previous messages, include it in current analysis
573
+ - If year was mentioned in previous messages, include it in current analysis
574
+ - If source was mentioned in previous messages, include it in current analysis
575
+ - Example: If conversation shows "User: Tell me about Pader District" then "User: 2023", extract both: district="Pader" and year="2023"
576
+ 5. **SMART FOLLOW-UP STRATEGY**:
577
+ - NEVER ask the same question twice in a row
578
+ - If user provides source info, ask for year or district next
579
+ - If user provides year info, ask for source or district next
580
+ - If user provides district info, ask for year or source next
581
+ - If user provides 2+ pieces of info, proceed to RAG instead of asking more
582
+ - Make follow-ups conversational and contextual, not robotic
583
+ 5. **DYNAMIC FOLLOW-UP EXAMPLES**:
584
+ - Budget queries: "What year are you interested in?" or "Which department - Local Government or Ministry?"
585
+ - PDM queries: "Which district are you interested in?" or "What year?"
586
+ - General queries: "Could you be more specific about what you'd like to know?"
587
+ 🎯 DECISION LOGIC:
588
+ - If query is a greeting/general chat β†’ needs_follow_up: true, provide helpful guidance
589
+ - If query has 2+ pieces of info β†’ needs_follow_up: false, proceed to RAG
590
+ - If query has 1 piece of info β†’ needs_follow_up: true, ask for missing piece
591
+ - If query has 0 pieces of info β†’ needs_follow_up: true, ask for clarification
592
+ RESPOND WITH JSON ONLY:
593
+ {{
594
+ "has_district": boolean,
595
+ "has_source": boolean,
596
+ "has_year": boolean,
597
+ "extracted_district": "single district name or JSON array of districts or null",
598
+ "extracted_source": "single source name or JSON array of sources or null",
599
+ "extracted_year": "single year or JSON array of years or null",
600
+ "confidence_score": 0.0-1.0,
601
+ "needs_follow_up": boolean,
602
+ "follow_up_question": "conversational question or helpful guidance or null"
603
+ }}"""),
604
+ HumanMessage(content=f"""Query: {query}
605
+ Conversation Context:
606
+ {conversation_context}
607
+ CRITICAL: You MUST analyze the FULL conversation context above, not just the current query.
608
+ - If ANY district was mentioned in previous messages, extract it
609
+ - If ANY year was mentioned in previous messages, extract it
610
+ - If ANY source was mentioned in previous messages, extract it
611
+ - Combine information from ALL messages in the conversation
612
+ Analyze this query using ONLY the exact values provided above:""")
613
+ ])
614
+
615
+ try:
616
+ response = self.llm.invoke(analysis_prompt.format_messages())
617
+
618
+ # Clean the response to extract JSON
619
+ content = response.content.strip()
620
+ if content.startswith("```json"):
621
+ # Remove markdown formatting
622
+ content = content.replace("```json", "").replace("```", "").strip()
623
+ elif content.startswith("```"):
624
+ # Remove generic markdown formatting
625
+ content = content.replace("```", "").strip()
626
+
627
+ # Clean and parse JSON with better error handling
628
+ try:
629
+ # Remove comments (// and /* */) from JSON
630
+ # Remove single-line comments
631
+ content = re.sub(r'//.*?$', '', content, flags=re.MULTILINE)
632
+ # Remove multi-line comments
633
+ content = re.sub(r'/\*.*?\*/', '', content, flags=re.DOTALL)
634
+
635
+ analysis = json.loads(content)
636
+ logger.info(f"πŸ” QUERY ANALYSIS: βœ… Parsed successfully")
637
+ except json.JSONDecodeError as e:
638
+ logger.error(f"❌ JSON parsing failed: {e}")
639
+ logger.error(f"❌ Raw content: {content[:200]}...")
640
+
641
+ # Try to extract JSON from text if embedded
642
+ json_match = re.search(r'\{.*\}', content, re.DOTALL)
643
+ if json_match:
644
+ try:
645
+ # Clean the extracted JSON
646
+ cleaned_json = json_match.group()
647
+ cleaned_json = re.sub(r'//.*?$', '', cleaned_json, flags=re.MULTILINE)
648
+ cleaned_json = re.sub(r'/\*.*?\*/', '', cleaned_json, flags=re.DOTALL)
649
+ analysis = json.loads(cleaned_json)
650
+ logger.info(f"πŸ” QUERY ANALYSIS: βœ… Extracted and cleaned JSON from text")
651
+ except json.JSONDecodeError as e2:
652
+ logger.error(f"❌ Failed to extract JSON from text: {e2}")
653
+ # Return fallback context
654
+ context = QueryContext(
655
+ has_district=False,
656
+ has_source=False,
657
+ has_year=False,
658
+ extracted_district=None,
659
+ extracted_source=None,
660
+ extracted_year=None,
661
+ confidence_score=0.0,
662
+ needs_follow_up=True,
663
+ follow_up_question="I apologize, but I'm having trouble processing your request. Could you please rephrase it or ask for help?"
664
+ )
665
+ return context
666
+ else:
667
+ # Return fallback context
668
+ context = QueryContext(
669
+ has_district=False,
670
+ has_source=False,
671
+ has_year=False,
672
+ extracted_district=None,
673
+ extracted_source=None,
674
+ extracted_year=None,
675
+ confidence_score=0.0,
676
+ needs_follow_up=True,
677
+ follow_up_question="I apologize, but I'm having trouble processing your request. Could you please rephrase it or ask for help?"
678
+ )
679
+ return context
680
+
681
+ # Validate extracted values against whitelists
682
+ extracted_district = analysis.get("extracted_district")
683
+ extracted_source = analysis.get("extracted_source")
684
+ extracted_year = analysis.get("extracted_year")
685
+
686
+ logger.info(f"πŸ” QUERY ANALYSIS: Raw extracted values - district: {extracted_district}, source: {extracted_source}, year: {extracted_year}")
687
+
688
+ # Validate district (handle both single values and arrays)
689
+ if extracted_district:
690
+ if isinstance(extracted_district, list):
691
+ # Validate each district in the array
692
+ valid_districts = []
693
+ for district in extracted_district:
694
+ normalized = self._normalize_district_name(district)
695
+ if normalized:
696
+ valid_districts.append(normalized)
697
+
698
+ if valid_districts:
699
+ extracted_district = valid_districts[0] if len(valid_districts) == 1 else valid_districts
700
+ logger.info(f"πŸ” QUERY ANALYSIS: Extracted districts: {extracted_district}")
701
+ else:
702
+ logger.warning(f"⚠️ No valid districts found in: '{extracted_district}'")
703
+ extracted_district = None
704
+ else:
705
+ # Single district validation with fuzzy matching
706
+ normalized = self._normalize_district_name(extracted_district)
707
+ if normalized:
708
+ if normalized != extracted_district:
709
+ logger.info(f"πŸ” QUERY ANALYSIS: Normalized district '{extracted_district}' to '{normalized}'")
710
+ extracted_district = normalized
711
+ else:
712
+ logger.warning(f"⚠️ Invalid district extracted: '{extracted_district}' not in whitelist")
713
+ extracted_district = None
714
+
715
+ # Validate source (handle both single values and arrays)
716
+ if extracted_source:
717
+ if isinstance(extracted_source, list):
718
+ # Validate each source in the array
719
+ valid_sources = []
720
+ for source in extracted_source:
721
+ if source in self.source_whitelist:
722
+ valid_sources.append(source)
723
+ else:
724
+ logger.warning(f"⚠️ Invalid source in array: '{source}' not in whitelist")
725
+
726
+ if valid_sources:
727
+ extracted_source = valid_sources[0] if len(valid_sources) == 1 else valid_sources
728
+ logger.info(f"πŸ” QUERY ANALYSIS: Extracted sources: {extracted_source}")
729
+ else:
730
+ logger.warning(f"⚠️ No valid sources found in: '{extracted_source}'")
731
+ extracted_source = None
732
+ else:
733
+ # Single source validation
734
+ if extracted_source not in self.source_whitelist:
735
+ logger.warning(f"⚠️ Invalid source extracted: '{extracted_source}' not in whitelist")
736
+ extracted_source = None
737
+
738
+ # Validate year (handle both single values and arrays)
739
+ if extracted_year:
740
+ if isinstance(extracted_year, list):
741
+ # Validate each year in the array
742
+ valid_years = []
743
+ for year in extracted_year:
744
+ year_str = str(year)
745
+ if year_str in self.year_whitelist:
746
+ valid_years.append(year_str)
747
+
748
+ if valid_years:
749
+ extracted_year = valid_years[0] if len(valid_years) == 1 else valid_years
750
+ logger.info(f"πŸ” QUERY ANALYSIS: Extracted years: {extracted_year}")
751
+ else:
752
+ logger.warning(f"⚠️ No valid years found in: '{extracted_year}'")
753
+ extracted_year = None
754
+ else:
755
+ # Single year validation
756
+ year_str = str(extracted_year)
757
+ if year_str not in self.year_whitelist:
758
+ logger.warning(f"⚠️ Invalid year extracted: '{extracted_year}' not in whitelist")
759
+ extracted_year = None
760
+ else:
761
+ extracted_year = year_str
762
+
763
+ logger.info(f"πŸ” QUERY ANALYSIS: Validated values - district: {extracted_district}, source: {extracted_source}, year: {extracted_year}")
764
+
765
+ # Create QueryContext object
766
+ context = QueryContext(
767
+ has_district=bool(extracted_district),
768
+ has_source=bool(extracted_source),
769
+ has_year=bool(extracted_year),
770
+ extracted_district=extracted_district,
771
+ extracted_source=extracted_source,
772
+ extracted_year=extracted_year,
773
+ ui_filters=ui_filters,
774
+ confidence_score=analysis.get("confidence_score", 0.0),
775
+ needs_follow_up=analysis.get("needs_follow_up", False),
776
+ follow_up_question=analysis.get("follow_up_question")
777
+ )
778
+
779
+ logger.info(f"πŸ” QUERY ANALYSIS: Analysis complete - needs_follow_up: {context.needs_follow_up}, confidence: {context.confidence_score}")
780
+
781
+ # If filenames are provided in UI, skip follow-ups and proceed to RAG
782
+ if ui_filters and ui_filters.get("filenames"):
783
+ logger.info(f"πŸ” QUERY ANALYSIS: Filenames provided, skipping follow-ups, proceeding to RAG")
784
+ context.needs_follow_up = False
785
+ context.follow_up_question = None
786
+
787
+ # Additional smart decision logic
788
+ if context.needs_follow_up:
789
+ # Check if we have enough information to proceed
790
+ info_count = sum([
791
+ bool(context.extracted_district),
792
+ bool(context.extracted_source),
793
+ bool(context.extracted_year)
794
+ ])
795
+
796
+ # Check if user is asking for more info vs providing it
797
+ query_lower = query.lower()
798
+ is_requesting_info = any(phrase in query_lower for phrase in [
799
+ "please provide", "could you provide", "can you provide",
800
+ "what is", "what are", "how much", "which", "what year",
801
+ "what district", "what source", "tell me about"
802
+ ])
803
+
804
+ # If we have 2+ pieces of info AND user is not requesting more info, proceed to RAG
805
+ if info_count >= 2 and not is_requesting_info:
806
+ logger.info(f"πŸ” QUERY ANALYSIS: Smart override - have {info_count} pieces of info and user not requesting more, proceeding to RAG")
807
+ context.needs_follow_up = False
808
+ context.follow_up_question = None
809
+ elif info_count >= 2 and is_requesting_info:
810
+ logger.info(f"πŸ” QUERY ANALYSIS: User requesting more info despite having {info_count} pieces, proceeding to RAG with comprehensive answer")
811
+ context.needs_follow_up = False
812
+ context.follow_up_question = None
813
+
814
+ return context
815
+
816
+ except Exception as e:
817
+ logger.error(f"❌ Query analysis failed: {e}")
818
+ # Fallback: proceed with RAG
819
+ return QueryContext(
820
+ has_district=bool(ui_filters.get("districts")),
821
+ has_source=bool(ui_filters.get("sources")),
822
+ has_year=bool(ui_filters.get("years")),
823
+ ui_filters=ui_filters,
824
+ confidence_score=0.5,
825
+ needs_follow_up=False
826
+ )
827
+
828
+ def _rewrite_query_for_rag(self, messages: List[Any], context: QueryContext) -> str:
829
+ """Rewrite query for optimal RAG retrieval"""
830
+ logger.info("πŸ”„ QUERY REWRITING: Starting query rewrite for RAG")
831
+ logger.info(f"πŸ”„ QUERY REWRITING: Processing {len(messages)} messages")
832
+
833
+ # Build conversation context
834
+ logger.info(f"πŸ”„ QUERY REWRITING: Building conversation context from last 6 messages")
835
+ conversation_lines = []
836
+ for i, msg in enumerate(messages[-6:]):
837
+ if isinstance(msg, HumanMessage):
838
+ conversation_lines.append(f"User: {msg.content}")
839
+ logger.info(f"πŸ”„ QUERY REWRITING: Message {i+1}: User - {msg.content[:50]}...")
840
+ elif isinstance(msg, AIMessage):
841
+ conversation_lines.append(f"Assistant: {msg.content}")
842
+ logger.info(f"πŸ”„ QUERY REWRITING: Message {i+1}: Assistant - {msg.content[:50]}...")
843
+
844
+ convo_text = "\n".join(conversation_lines)
845
+ logger.info(f"πŸ”„ QUERY REWRITING: Conversation context built ({len(convo_text)} chars)")
846
+
847
+ # Create rewrite prompt
848
+ rewrite_prompt = ChatPromptTemplate.from_messages([
849
+ SystemMessage(content=f"""You are a query rewriter for RAG retrieval.
850
+ GOAL: Create the best possible search query for document retrieval.
851
+ CRITICAL RULES:
852
+ 1. Focus on the core information need from the conversation
853
+ 2. Remove meta-verbs like "summarize", "list", "compare", "how much", "what" - keep the content focus
854
+ 3. DO NOT include filter details (years, districts, sources) - these are applied separately as filters
855
+ 4. DO NOT include specific years, district names, or source types in the query
856
+ 5. Output ONE clear sentence suitable for vector search
857
+ 6. Keep it generic and focused on the topic/subject matter
858
+ EXAMPLES:
859
+ - "What are the top challenges in budget allocation?" β†’ "budget allocation challenges"
860
+ - "How were PDM administrative costs utilized in 2023?" β†’ "PDM administrative costs utilization"
861
+ - "Compare salary management across districts" β†’ "salary management"
862
+ - "How much was budget allocation for Local Government in 2023?" β†’ "budget allocation"
863
+ OUTPUT FORMAT:
864
+ Provide your response in this exact format:
865
+ EXPLANATION: [Your reasoning here]
866
+ QUERY: [One clean sentence for retrieval]
867
+ The QUERY line will be extracted and used directly for RAG retrieval."""),
868
+ HumanMessage(content=f"""Conversation:
869
+ {convo_text}
870
+ Rewrite the best retrieval query:""")
871
+ ])
872
+
873
+ try:
874
+ logger.info(f"πŸ”„ QUERY REWRITING: Calling LLM for query rewrite")
875
+ response = self.llm.invoke(rewrite_prompt.format_messages())
876
+ logger.info(f"πŸ”„ QUERY REWRITING: LLM response received: {response.content[:100]}...")
877
+
878
+ rewritten = response.content.strip()
879
+
880
+ # Extract only the QUERY line from the structured response
881
+ lines = rewritten.split('\n')
882
+ query_line = None
883
+ for line in lines:
884
+ if line.strip().startswith('QUERY:'):
885
+ query_line = line.replace('QUERY:', '').strip()
886
+ break
887
+
888
+ if query_line and len(query_line) > 5:
889
+ logger.info(f"πŸ”„ QUERY REWRITING: Query rewritten successfully: '{query_line[:50]}...'")
890
+ return query_line
891
+ else:
892
+ logger.info(f"πŸ”„ QUERY REWRITING: No QUERY line found or too short, using fallback")
893
+ # Fallback to last user message
894
+ for msg in reversed(messages):
895
+ if isinstance(msg, HumanMessage):
896
+ logger.info(f"πŸ”„ QUERY REWRITING: Using fallback message: '{msg.content[:50]}...'")
897
+ return msg.content
898
+ logger.info(f"πŸ”„ QUERY REWRITING: Using default fallback")
899
+ return "audit report information"
900
+
901
+ except Exception as e:
902
+ logger.error(f"❌ QUERY REWRITING: Error during rewrite: {e}")
903
+ # Fallback
904
+ for msg in reversed(messages):
905
+ if isinstance(msg, HumanMessage):
906
+ logger.info(f"πŸ”„ QUERY REWRITING: Using error fallback message: '{msg.content[:50]}...'")
907
+ return msg.content
908
+ logger.info(f"πŸ”„ QUERY REWRITING: Using default error fallback")
909
+ return "audit report information"
910
+
911
+ def _build_filters(self, context: QueryContext) -> Dict[str, Any]:
912
+ """Build filters for RAG retrieval"""
913
+ logger.info("πŸ”§ FILTER BUILDING: Starting filter construction")
914
+ filters = {}
915
+
916
+ # Check for filename filtering first (mutually exclusive)
917
+ if context.ui_filters and context.ui_filters.get("filenames"):
918
+ logger.info(f"πŸ”§ FILTER BUILDING: Filename filtering requested (mutually exclusive mode)")
919
+ filters["filenames"] = context.ui_filters["filenames"]
920
+ logger.info(f"πŸ”§ FILTER BUILDING: Added filenames filter: {context.ui_filters['filenames']}")
921
+ logger.info(f"πŸ”§ FILTER BUILDING: Final filters: {filters}")
922
+ return filters # Return early, skip all other filters
923
+
924
+ # UI filters take priority, but merge with extracted context if UI filters are incomplete
925
+ if context.ui_filters:
926
+ logger.info(f"πŸ”§ FILTER BUILDING: UI filters present: {context.ui_filters}")
927
+
928
+ # Add UI filters first
929
+ if context.ui_filters.get("sources"):
930
+ filters["sources"] = context.ui_filters["sources"]
931
+ logger.info(f"πŸ”§ FILTER BUILDING: Added sources filter from UI: {context.ui_filters['sources']}")
932
+
933
+ if context.ui_filters.get("years"):
934
+ filters["year"] = context.ui_filters["years"]
935
+ logger.info(f"πŸ”§ FILTER BUILDING: Added years filter from UI: {context.ui_filters['years']}")
936
+
937
+ if context.ui_filters.get("districts"):
938
+ # Normalize district names to title case (match Qdrant metadata format)
939
+ normalized_districts = [d.title() for d in context.ui_filters['districts']]
940
+ filters["district"] = normalized_districts
941
+ logger.info(f"πŸ”§ FILTER BUILDING: Added districts filter from UI: {context.ui_filters['districts']} β†’ normalized: {normalized_districts}")
942
+
943
+ # Merge with extracted context for missing filters
944
+ if not filters.get("district") and context.extracted_district:
945
+ # Normalize district names using the normalization function
946
+ if isinstance(context.extracted_district, list):
947
+ normalized_districts = []
948
+ for d in context.extracted_district:
949
+ normalized = self._normalize_district_name(d)
950
+ if normalized:
951
+ normalized_districts.append(normalized)
952
+ if normalized_districts:
953
+ filters["district"] = normalized_districts
954
+ logger.info(f"πŸ”§ FILTER BUILDING: Added districts filter from context: {context.extracted_district} β†’ normalized: {normalized_districts}")
955
+ else:
956
+ normalized = self._normalize_district_name(context.extracted_district)
957
+ if normalized:
958
+ filters["district"] = [normalized]
959
+ logger.info(f"πŸ”§ FILTER BUILDING: Added district filter from context: {context.extracted_district} β†’ normalized: {normalized}")
960
+
961
+ if not filters.get("year") and context.extracted_year:
962
+ # Handle both single values and arrays
963
+ if isinstance(context.extracted_year, list):
964
+ filters["year"] = context.extracted_year
965
+ else:
966
+ filters["year"] = [context.extracted_year]
967
+ logger.info(f"πŸ”§ FILTER BUILDING: Added extracted year filter (UI missing): {context.extracted_year}")
968
+
969
+ if not filters.get("sources") and context.extracted_source:
970
+ # Handle both single values and arrays
971
+ if isinstance(context.extracted_source, list):
972
+ filters["sources"] = context.extracted_source
973
+ else:
974
+ filters["sources"] = [context.extracted_source]
975
+ logger.info(f"πŸ”§ FILTER BUILDING: Added extracted source filter (UI missing): {context.extracted_source}")
976
+ else:
977
+ logger.info(f"πŸ”§ FILTER BUILDING: No UI filters, using extracted context")
978
+ # Use extracted context
979
+ if context.extracted_source:
980
+ # Handle both single values and arrays
981
+ if isinstance(context.extracted_source, list):
982
+ filters["sources"] = context.extracted_source
983
+ else:
984
+ filters["sources"] = [context.extracted_source]
985
+ logger.info(f"πŸ”§ FILTER BUILDING: Added extracted source filter: {context.extracted_source}")
986
+
987
+ if context.extracted_year:
988
+ # Handle both single values and arrays
989
+ if isinstance(context.extracted_year, list):
990
+ filters["year"] = context.extracted_year
991
+ else:
992
+ filters["year"] = [context.extracted_year]
993
+ logger.info(f"πŸ”§ FILTER BUILDING: Added extracted year filter: {context.extracted_year}")
994
+
995
+ if context.extracted_district:
996
+ # Normalize district names using the normalization function
997
+ if isinstance(context.extracted_district, list):
998
+ normalized_districts = []
999
+ for d in context.extracted_district:
1000
+ normalized = self._normalize_district_name(d)
1001
+ if normalized:
1002
+ normalized_districts.append(normalized)
1003
+ if normalized_districts:
1004
+ filters["district"] = normalized_districts
1005
+ logger.info(f"πŸ”§ FILTER BUILDING: Added districts filter from context: {context.extracted_district} β†’ normalized: {normalized_districts}")
1006
+ else:
1007
+ normalized = self._normalize_district_name(context.extracted_district)
1008
+ if normalized:
1009
+ filters["district"] = [normalized]
1010
+ logger.info(f"πŸ”§ FILTER BUILDING: Added district filter from context: {context.extracted_district} β†’ normalized: {normalized}")
1011
+
1012
+ logger.info(f"πŸ”§ FILTER BUILDING: Final filters: {filters}")
1013
+ return filters
1014
+
1015
+ def _generate_conversational_response(self, query: str, documents: List[Any], rag_answer: str, messages: List[Any]) -> str:
1016
+ """Generate conversational response from RAG results"""
1017
+ logger.info("πŸ’¬ RESPONSE GENERATION: Starting conversational response generation")
1018
+ logger.info(f"πŸ’¬ RESPONSE GENERATION: Processing {len(documents)} documents")
1019
+ logger.info(f"πŸ’¬ RESPONSE GENERATION: Query: '{query[:50]}...'")
1020
+ logger.info(f"πŸ’¬ RESPONSE GENERATION: Conversation history: {len(messages)} messages")
1021
+
1022
+ # Build conversation history context
1023
+ conversation_context = self._build_conversation_context(messages)
1024
+
1025
+ # Build detailed document information
1026
+ document_details = self._build_document_details(documents)
1027
+
1028
+ # Extract correct district/source/year names from documents (to correct misspellings)
1029
+ correct_names = self._extract_correct_names_from_documents(documents)
1030
+
1031
+ # Create response prompt
1032
+ logger.info(f"πŸ’¬ RESPONSE GENERATION: Building response prompt")
1033
+ response_prompt = ChatPromptTemplate.from_messages([
1034
+ SystemMessage(content="""You are a helpful audit report assistant. Generate a natural, conversational response.
1035
+ CRITICAL RULES - NO HALLUCINATION:
1036
+ 1. **ONLY use information from the retrieved documents provided below**
1037
+ 2. **EVERY sentence with facts, numbers, or specific claims MUST have a [Doc i] reference**
1038
+ 3. **If a document doesn't contain the information, DO NOT make it up**
1039
+ 4. **If the user asks about a year/district that's NOT in the retrieved documents, explicitly state that**
1040
+ 5. **Check the document years/districts before making any claims about them**
1041
+ 6. **USE CORRECT NAMES**: If the conversation mentions a misspelled district/source name (e.g., "Kalagala"), use the CORRECT spelling from the document metadata (e.g., "Kalangala"). Always use the exact names from document metadata, not misspellings from conversation.
1042
+ RULES:
1043
+ 1. Answer the user's question directly and clearly
1044
+ 2. Use ONLY the retrieved documents as evidence - DO NOT use your training data
1045
+ 3. Be conversational, not technical
1046
+ 4. Don't mention scores, retrieval details, or technical implementation
1047
+ 5. If relevant documents were found, reference them naturally
1048
+ 6. If no relevant documents, say you do not have enough information - DO NOT hallucinate
1049
+ 7. If the passages have useful facts or numbers, use them in your answer WITH references
1050
+ 8. **MANDATORY**: When you use information from a passage, mention where it came from by using [Doc i] at the end of the sentence. i stands for the number of the document.
1051
+ 9. Do not use the sentence 'Doc i says ...' to say where information came from.
1052
+ 10. If the same thing is said in more than one document, you can mention all of them like this: [Doc i, Doc j, Doc k]
1053
+ 11. Do not just summarize each passage one by one. Group your summaries to highlight the key parts in the explanation.
1054
+ 12. If it makes sense, use bullet points and lists to make your answers easier to understand.
1055
+ 13. You do not need to use every passage. Only use the ones that help answer the question.
1056
+ 14. **VERIFY**: Before mentioning any year, district, or number, check that it exists in the retrieved documents. If it doesn't, say "I don't have information about [year/district] in the retrieved documents."
1057
+ 15. **NO HALLUCINATION**: If documents show years 2021, 2022, 2023 but user asks about 2020, DO NOT provide 2020 data. Instead say "The retrieved documents cover 2021-2023, but I don't have information for 2020."
1058
+ 16. **USE CORRECT SPELLING**: Always use the district/source names exactly as they appear in the document metadata below, even if the conversation history has misspellings.
1059
+ TONE: Professional but friendly, like talking to a colleague."""),
1060
+ HumanMessage(content=f"""Conversation History:
1061
+ {conversation_context}
1062
+ Current User Question: {query}
1063
+ Retrieved Documents: {len(documents)} documents found
1064
+ CORRECT NAMES TO USE (from document metadata - use these exact spellings):
1065
+ {correct_names}
1066
+ Full Document Details:
1067
+ {document_details}
1068
+ RAG Answer: {rag_answer}
1069
+ CRITICAL:
1070
+ - Responses should be grounded to what is available in the retrieved documents
1071
+ - If user asks about a specific year but documents show other years, or districts or sources then explicitly state "can't provide response on ... because ..."
1072
+ - Every factual claim MUST have [Doc i] reference
1073
+ - If information is not in documents, explicitly state it's not available
1074
+ - **USE THE CORRECT DISTRICT/SOURCE NAMES from the document metadata above, not misspellings from conversation**
1075
+ Generate a conversational response with proper document references:""")
1076
+ ])
1077
+
1078
+ try:
1079
+ logger.info(f"πŸ’¬ RESPONSE GENERATION: Calling LLM for final response")
1080
+ response = self.llm.invoke(response_prompt.format_messages())
1081
+ logger.info(f"πŸ’¬ RESPONSE GENERATION: LLM response received: {response.content[:100]}...")
1082
+
1083
+ # Post-process response to ensure no hallucination
1084
+ final_response = self._validate_and_enhance_response(
1085
+ response.content.strip(),
1086
+ documents,
1087
+ query
1088
+ )
1089
+
1090
+ return final_response
1091
+ except Exception as e:
1092
+ logger.error(f"❌ RESPONSE GENERATION: Error during generation: {e}")
1093
+ logger.info(f"πŸ’¬ RESPONSE GENERATION: Using RAG answer as fallback")
1094
+ return rag_answer # Fallback to RAG answer
1095
+
1096
+ def _build_conversation_context(self, messages: List[Any]) -> str:
1097
+ """Build conversation history context for response generation."""
1098
+ if not messages:
1099
+ return "No previous conversation."
1100
+
1101
+ context_lines = []
1102
+ # Show last 6 messages for context (to capture the current exchange)
1103
+ for msg in messages[-6:]:
1104
+ if isinstance(msg, HumanMessage):
1105
+ context_lines.append(f"User: {msg.content}")
1106
+ elif isinstance(msg, AIMessage):
1107
+ context_lines.append(f"Assistant: {msg.content}")
1108
+
1109
+ return "\n".join(context_lines) if context_lines else "No previous conversation."
1110
+
1111
+ def _build_document_details(self, documents: List[Any]) -> str:
1112
+ """Build detailed document information for response generation."""
1113
+ if not documents:
1114
+ return "No documents retrieved."
1115
+
1116
+ details = []
1117
+ for i, doc in enumerate(documents[:15], 1): # Show up to 15 documents
1118
+ metadata = getattr(doc, 'metadata', {}) if hasattr(doc, 'metadata') else (doc if isinstance(doc, dict) else {})
1119
+ content = getattr(doc, 'page_content', '') if hasattr(doc, 'page_content') else (doc.get('content', '') if isinstance(doc, dict) else '')
1120
+
1121
+ if isinstance(metadata, dict):
1122
+ filename = metadata.get('filename', 'Unknown')
1123
+ year = metadata.get('year', 'Unknown')
1124
+ district = metadata.get('district', 'Unknown')
1125
+ source = metadata.get('source', 'Unknown')
1126
+ page = metadata.get('page', metadata.get('page_label', 'Unknown'))
1127
+
1128
+ doc_info = f"[Doc {i}]"
1129
+ doc_info += f"\n Filename: {filename}"
1130
+ doc_info += f"\n Year: {year}"
1131
+ doc_info += f"\n District: {district}"
1132
+ doc_info += f"\n Source: {source}"
1133
+ if page != 'Unknown':
1134
+ doc_info += f"\n Page: {page}"
1135
+ doc_info += f"\n Content: {content[:300]}{'...' if len(content) > 300 else ''}"
1136
+ details.append(doc_info)
1137
+
1138
+ return "\n\n".join(details) if details else "No document details available."
1139
+
1140
+ def _extract_correct_names_from_documents(self, documents: List[Any]) -> str:
1141
+ """Extract correct district/source names from documents to correct misspellings."""
1142
+ districts = set()
1143
+ sources = set()
1144
+ years = set()
1145
+
1146
+ for doc in documents:
1147
+ metadata = getattr(doc, 'metadata', {}) if hasattr(doc, 'metadata') else (doc if isinstance(doc, dict) else {})
1148
+ if isinstance(metadata, dict):
1149
+ if metadata.get('district'):
1150
+ districts.add(str(metadata['district']))
1151
+ if metadata.get('source'):
1152
+ sources.add(str(metadata['source']))
1153
+ if metadata.get('year'):
1154
+ years.add(str(metadata['year']))
1155
+
1156
+ result = []
1157
+ if districts:
1158
+ result.append(f"Districts: {', '.join(sorted(districts))}")
1159
+ if sources:
1160
+ result.append(f"Sources: {', '.join(sorted(sources))}")
1161
+ if years:
1162
+ result.append(f"Years: {', '.join(sorted(years))}")
1163
+
1164
+ if result:
1165
+ return "\n".join(result) + "\n\nIMPORTANT: Use these EXACT spellings in your response, even if the conversation history has misspellings."
1166
+ return "No metadata available."
1167
+
1168
+ def _validate_and_enhance_response(self, response: str, documents: List[Any], query: str) -> str:
1169
+ """Validate response and ensure all claims are referenced."""
1170
+ # Extract years and districts from documents
1171
+ doc_years = set()
1172
+ doc_districts = set()
1173
+ doc_sources = set()
1174
+
1175
+ for doc in documents:
1176
+ metadata = getattr(doc, 'metadata', {}) if hasattr(doc, 'metadata') else (doc if isinstance(doc, dict) else {})
1177
+ if isinstance(metadata, dict):
1178
+ if metadata.get('year'):
1179
+ doc_years.add(str(metadata['year']))
1180
+ if metadata.get('district'):
1181
+ doc_districts.add(str(metadata['district']))
1182
+ if metadata.get('source'):
1183
+ doc_sources.add(str(metadata['source']))
1184
+
1185
+ # Correct misspellings in response using correct names from documents
1186
+ # response = self._correct_misspellings_in_response(response, doc_districts, doc_sources)
1187
+
1188
+ # Check if response mentions years not in documents
1189
+ year_pattern = r'\b(20\d{2})\b'
1190
+ mentioned_years = set(re.findall(year_pattern, response))
1191
+
1192
+ # Check if user query mentions a year
1193
+ query_years = set(re.findall(year_pattern, query))
1194
+
1195
+ # If user asks about a year not in documents, add a warning
1196
+ missing_years = query_years - doc_years
1197
+ if missing_years and doc_years:
1198
+ warning = f"\n\n⚠️ Note: The retrieved documents cover years {', '.join(sorted(doc_years))}, but I don't have information for {', '.join(sorted(missing_years))} in the retrieved documents."
1199
+ if warning not in response:
1200
+ response = response + warning
1201
+
1202
+ # Check if response has document references
1203
+ doc_ref_pattern = r'\[Doc\s+\d+\]'
1204
+ has_refs = bool(re.search(doc_ref_pattern, response))
1205
+
1206
+ # If response has factual claims but no references, add a note
1207
+ if not has_refs and len(documents) > 0:
1208
+ # Check if response has numbers or specific claims (simple heuristic)
1209
+ has_numbers = bool(re.search(r'\d+', response))
1210
+ if has_numbers and len(response) > 50:
1211
+ logger.warning("⚠️ Response contains factual claims but no document references")
1212
+ # Don't modify response, but log the issue
1213
+
1214
+ return response
1215
+
1216
+ def _generate_conversational_response_without_docs(self, query: str, messages: List[Any]) -> str:
1217
+ """Generate conversational response using only LLM knowledge and conversation history"""
1218
+ logger.info("πŸ’¬ RESPONSE GENERATION (NO DOCS): Starting response generation without documents")
1219
+ logger.info(f"πŸ’¬ RESPONSE GENERATION (NO DOCS): Query: '{query[:50]}...'")
1220
+
1221
+ # Build conversation context
1222
+ conversation_context = ""
1223
+ for i, msg in enumerate(messages[-6:]): # Last 6 messages for context
1224
+ if isinstance(msg, HumanMessage):
1225
+ conversation_context += f"User: {msg.content}\n"
1226
+ elif isinstance(msg, AIMessage):
1227
+ conversation_context += f"Assistant: {msg.content}\n"
1228
+
1229
+ # Create response prompt
1230
+ logger.info(f"πŸ’¬ RESPONSE GENERATION (NO DOCS): Building response prompt")
1231
+ response_prompt = ChatPromptTemplate.from_messages([
1232
+ SystemMessage(content="""You are a helpful audit report assistant. Generate a natural, conversational response.
1233
+ RULES:
1234
+ 1. Answer the user's question directly and clearly based on your knowledge
1235
+ 2. Use conversation history for context
1236
+ 3. Be conversational, not technical
1237
+ 4. Acknowledge if the answer is based on general knowledge rather than specific documents
1238
+ 5. Stay professional but friendly
1239
+ TONE: Professional but friendly, like talking to a colleague."""),
1240
+ HumanMessage(content=f"""Current Question: {query}
1241
+ Conversation History:
1242
+ {conversation_context}
1243
+ Generate a conversational response based on your knowledge:""")
1244
+ ])
1245
+
1246
+ try:
1247
+ logger.info(f"πŸ’¬ RESPONSE GENERATION (NO DOCS): Calling LLM")
1248
+ response = self.llm.invoke(response_prompt.format_messages())
1249
+ logger.info(f"πŸ’¬ RESPONSE GENERATION (NO DOCS): LLM response received: {response.content[:100]}...")
1250
+ return response.content.strip()
1251
+ except Exception as e:
1252
+ logger.error(f"❌ RESPONSE GENERATION (NO DOCS): Error during generation: {e}")
1253
+ return "I apologize, but I encountered an error. Please try asking your question differently."
1254
+
1255
+ def chat(self, user_input: str, conversation_id: str = "default") -> Dict[str, Any]:
1256
+ """Main chat interface"""
1257
+ logger.info(f"πŸ’¬ MULTI-AGENT CHAT: Processing '{user_input[:50]}...'")
1258
+
1259
+ # Load conversation
1260
+ logger.info(f"πŸ’¬ MULTI-AGENT CHAT: Loading conversation {conversation_id}")
1261
+ conversation_file = self.conversations_dir / f"{conversation_id}.json"
1262
+ conversation = self._load_conversation(conversation_file)
1263
+ logger.info(f"πŸ’¬ MULTI-AGENT CHAT: Loaded {len(conversation['messages'])} previous messages")
1264
+
1265
+ # Add user message
1266
+ conversation["messages"].append(HumanMessage(content=user_input))
1267
+ logger.info(f"πŸ’¬ MULTI-AGENT CHAT: Added user message to conversation")
1268
+
1269
+ # Prepare state
1270
+ logger.info(f"πŸ’¬ MULTI-AGENT CHAT: Preparing state for graph execution")
1271
+ state = MultiAgentState(
1272
+ conversation_id=conversation_id,
1273
+ messages=conversation["messages"],
1274
+ current_query=user_input,
1275
+ query_context=None,
1276
+ rag_query=None,
1277
+ rag_filters=None,
1278
+ retrieved_documents=None,
1279
+ final_response=None,
1280
+ agent_logs=[],
1281
+ conversation_context=conversation.get("context", {}),
1282
+ session_start_time=conversation["session_start_time"],
1283
+ last_ai_message_time=conversation["last_ai_message_time"]
1284
+ )
1285
+
1286
+ # Run multi-agent graph
1287
+ logger.info(f"πŸ’¬ MULTI-AGENT CHAT: Executing multi-agent graph")
1288
+ final_state = self.graph.invoke(state)
1289
+ logger.info(f"πŸ’¬ MULTI-AGENT CHAT: Graph execution completed")
1290
+
1291
+ # Add AI response to conversation
1292
+ if final_state["final_response"]:
1293
+ conversation["messages"].append(AIMessage(content=final_state["final_response"]))
1294
+ logger.info(f"πŸ’¬ MULTI-AGENT CHAT: Added AI response to conversation")
1295
+
1296
+ # Update conversation
1297
+ conversation["last_ai_message_time"] = final_state["last_ai_message_time"]
1298
+ conversation["context"] = final_state["conversation_context"]
1299
+
1300
+ # Save conversation
1301
+ logger.info(f"πŸ’¬ MULTI-AGENT CHAT: Saving conversation")
1302
+ self._save_conversation(conversation_file, conversation)
1303
+
1304
+ logger.info("βœ… MULTI-AGENT CHAT: Completed")
1305
+
1306
+ # Return response and RAG results
1307
+ return {
1308
+ 'response': final_state["final_response"],
1309
+ 'rag_result': {
1310
+ 'sources': final_state["retrieved_documents"] or [],
1311
+ 'answer': final_state["final_response"]
1312
+ },
1313
+ 'agent_logs': final_state["agent_logs"],
1314
+ 'actual_rag_query': final_state.get("rag_query", "")
1315
+ }
1316
+
1317
+ def _load_conversation(self, conversation_file: Path) -> Dict[str, Any]:
1318
+ """Load conversation from file"""
1319
+ if conversation_file.exists():
1320
+ try:
1321
+ with open(conversation_file) as f:
1322
+ data = json.load(f)
1323
+ # Convert message dicts back to LangChain messages
1324
+ messages = []
1325
+ for msg_data in data.get("messages", []):
1326
+ if msg_data["type"] == "human":
1327
+ messages.append(HumanMessage(content=msg_data["content"]))
1328
+ elif msg_data["type"] == "ai":
1329
+ messages.append(AIMessage(content=msg_data["content"]))
1330
+ data["messages"] = messages
1331
+ return data
1332
+ except Exception as e:
1333
+ logger.warning(f"Could not load conversation: {e}")
1334
+
1335
+ # Return default conversation
1336
+ return {
1337
+ "messages": [],
1338
+ "session_start_time": time.time(),
1339
+ "last_ai_message_time": time.time(),
1340
+ "context": {}
1341
+ }
1342
+
1343
+ def _save_conversation(self, conversation_file: Path, conversation: Dict[str, Any]):
1344
+ """Save conversation to file"""
1345
+ try:
1346
+ # Ensure the conversations directory exists with proper permissions
1347
+ conversation_file.parent.mkdir(parents=True, mode=0o777, exist_ok=True)
1348
+
1349
+ # Convert messages to serializable format
1350
+ messages_data = []
1351
+ for msg in conversation["messages"]:
1352
+ if isinstance(msg, HumanMessage):
1353
+ messages_data.append({"type": "human", "content": msg.content})
1354
+ elif isinstance(msg, AIMessage):
1355
+ messages_data.append({"type": "ai", "content": msg.content})
1356
+
1357
+ conversation_data = {
1358
+ "messages": messages_data,
1359
+ "session_start_time": conversation["session_start_time"],
1360
+ "last_ai_message_time": conversation["last_ai_message_time"],
1361
+ "context": conversation.get("context", {})
1362
+ }
1363
+
1364
+ with open(conversation_file, 'w') as f:
1365
+ json.dump(conversation_data, f, indent=2)
1366
+
1367
+ except Exception as e:
1368
+ logger.error(f"Could not save conversation: {e}")
1369
+ logger.error(f"Traceback: {traceback.format_exc()}")
1370
+
1371
+
1372
+ def get_multi_agent_chatbot():
1373
+ """Get multi-agent chatbot instance"""
1374
+ return MultiAgentRAGChatbot()
1375
+
1376
+ if __name__ == "__main__":
1377
+ # Test the multi-agent system
1378
+ chatbot = MultiAgentRAGChatbot()
1379
+
1380
+ # Test conversation
1381
+ result = chatbot.chat("List me top 10 challenges in budget allocation for the last 3 years")
1382
+ print("Response:", result['response'])
1383
+ print("Agent Logs:", result['agent_logs'])
src/agents/visual_multi_agent_chatbot.py CHANGED
@@ -809,8 +809,8 @@ Generate a helpful response:""")
809
  if missing_years:
810
  warnings.append(
811
  f"You requested data for years {', '.join(sorted(requested_years))}, "
812
- f"but the retrieved documents only cover {', '.join(sorted(doc_years))}. "
813
- f"Data for {', '.join(sorted(missing_years))} may not be available."
814
  )
815
 
816
  # Compare requested vs retrieved DISTRICTS
@@ -824,8 +824,8 @@ Generate a helpful response:""")
824
  missing_districts = [d for d in requested_districts if d.lower() in missing_lower]
825
  warnings.append(
826
  f"You requested data for districts {', '.join(sorted(requested_districts))}, "
827
- f"but the retrieved documents only cover {', '.join(sorted(doc_districts))}. "
828
- f"Data for {', '.join(sorted(missing_districts))} may not be available."
829
  )
830
 
831
  # Add warnings to response if any
@@ -861,24 +861,28 @@ def get_visual_multi_agent_chatbot() -> VisualMultiAgentChatbot:
861
  os.environ.get("QDRANT_API_KEY")
862
  )
863
 
 
 
 
864
  if not qdrant_url or not qdrant_api_key:
865
  raise ValueError(
866
  "Visual mode requires Qdrant credentials for the ColPali cluster.\n"
867
  "Please set one of these in your .env file:\n"
868
  " - QDRANT_URL_AKRYL and QDRANT_API_KEY_AKRYL\n"
869
  " - DEST_QDRANT_URL and DEST_QDRANT_API_KEY\n"
870
- " - QDRANT_URL and QDRANT_API_KEY"
 
871
  )
872
 
873
  logger.info(f" Using Qdrant URL: {qdrant_url}")
874
- logger.info(f" Collection: colSmol-500M")
875
  logger.info(f" Multi-modal: {MULTIMODAL_ENABLED} (model: {MULTIMODAL_MODEL}, max_images: {MULTIMODAL_MAX_IMAGES})")
876
 
877
  # Create visual search adapter
878
  visual_search = VisualSearchAdapter(
879
  qdrant_url=qdrant_url,
880
  qdrant_api_key=qdrant_api_key,
881
- collection_name="colSmol-500M"
882
  )
883
 
884
  # Create multi-agent chatbot with multi-modal enabled
 
809
  if missing_years:
810
  warnings.append(
811
  f"You requested data for years {', '.join(sorted(requested_years))}, "
812
+ f"but the retrieved documents are missing {', '.join(sorted(missing_years))} "
813
+ f"(may not be available in the database)."
814
  )
815
 
816
  # Compare requested vs retrieved DISTRICTS
 
824
  missing_districts = [d for d in requested_districts if d.lower() in missing_lower]
825
  warnings.append(
826
  f"You requested data for districts {', '.join(sorted(requested_districts))}, "
827
+ f"but the retrieved documents are missing {', '.join(sorted(missing_districts))} "
828
+ f"(may not be available in the database)."
829
  )
830
 
831
  # Add warnings to response if any
 
861
  os.environ.get("QDRANT_API_KEY")
862
  )
863
 
864
+ # Get collection name from env var (default to colSmol-500M-v2 for new processing)
865
+ collection_name = os.environ.get("QDRANT_COLLECTION_VISUAL", "colSmol-500M-v2")
866
+
867
  if not qdrant_url or not qdrant_api_key:
868
  raise ValueError(
869
  "Visual mode requires Qdrant credentials for the ColPali cluster.\n"
870
  "Please set one of these in your .env file:\n"
871
  " - QDRANT_URL_AKRYL and QDRANT_API_KEY_AKRYL\n"
872
  " - DEST_QDRANT_URL and DEST_QDRANT_API_KEY\n"
873
+ " - QDRANT_URL and QDRANT_API_KEY\n"
874
+ "And optionally set QDRANT_COLLECTION_VISUAL (default: colSmol-500M-v2)"
875
  )
876
 
877
  logger.info(f" Using Qdrant URL: {qdrant_url}")
878
+ logger.info(f" Collection: {collection_name}")
879
  logger.info(f" Multi-modal: {MULTIMODAL_ENABLED} (model: {MULTIMODAL_MODEL}, max_images: {MULTIMODAL_MAX_IMAGES})")
880
 
881
  # Create visual search adapter
882
  visual_search = VisualSearchAdapter(
883
  qdrant_url=qdrant_url,
884
  qdrant_api_key=qdrant_api_key,
885
+ collection_name=collection_name
886
  )
887
 
888
  # Create multi-agent chatbot with multi-modal enabled