Zeggai Abdellah commited on
Commit
e40cfb6
Β·
1 Parent(s): b12f17b

add fall back system

Browse files
Files changed (1) hide show
  1. rag_pipeline.py +243 -69
rag_pipeline.py CHANGED
@@ -1,7 +1,8 @@
1
  # -*- coding: utf-8 -*-
2
  """
3
- Enhanced RAG Pipeline for vaccine assistant
4
  Handles agent creation and question answering with sequential citation numbering
 
5
  """
6
 
7
  import json
@@ -106,13 +107,60 @@ def convert_citations_to_sequential(response_text, source_id_to_number_map):
106
  return sequential_response
107
 
108
 
109
-
110
- def create_safe_custom_prompt(tools, llm):
111
  """Create a safe version that won't have formatting conflicts"""
112
 
113
- print(f"[LOG] Creating custom prompt with {len(tools)} tools")
114
 
115
- custom_instructions = """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  ## MEDICAL ASSISTANT ROLE
117
  You are a helpful and knowledgeable AI-powered vaccine assistant designed to support doctors in clinical decision-making.
118
  You provide evidence-based guidance using only information from official vaccine medical documents.
@@ -176,43 +224,70 @@ If you cannot find complete information to fully answer a question:
176
  template_vars=original_prompt.template_vars,
177
  metadata=original_prompt.metadata if hasattr(original_prompt, 'metadata') else None
178
  )
179
- print("[LOG] βœ… Successfully created safe custom prompt")
180
  return new_prompt
181
  except:
182
  # Even safer fallback
183
- print("[LOG] ⚠️ Using fallback prompt template")
184
  return PromptTemplate(template=safe_template)
185
 
186
- def create_agent(tools, llm):
 
187
  """Create the ReAct agent with custom prompt"""
188
 
189
- print(f"[LOG] Creating ReAct agent with {len(tools)} tools and max_iterations=8")
 
190
 
191
- # Create agent with increased max iterations and better handling
192
- # Force verbose=True to see the Thought/Action/Observation cycle
 
193
  agent = ReActAgent.from_tools(
194
  tools,
195
  llm=llm,
196
- verbose=True, # This should show the ReAct reasoning steps
197
- max_iterations=8, # Reduced from default to prevent excessive looping
198
  )
199
 
200
- # Create and apply safe custom prompt
201
  try:
202
- safe_custom_prompt = create_safe_custom_prompt(tools, llm)
203
  agent.update_prompts({"agent_worker:system_prompt": safe_custom_prompt})
204
- print("βœ… Successfully updated with safe custom prompt")
205
  except Exception as e:
206
- print(f"❌ Safe prompt update failed: {e}")
207
- print("⚠️ Using original agent without modifications")
208
 
209
- print("[LOG] Agent creation completed")
210
  return agent
211
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
  def initialize_rag_pipeline(tools):
213
- """Initialize the RAG pipeline with tools"""
214
 
215
- print("[LOG] Initializing RAG pipeline...")
216
  print(f"[LOG] Available tools: {[tool.metadata.name if hasattr(tool, 'metadata') else str(tool) for tool in tools]}")
217
 
218
  # Initialize LlamaIndex LLM
@@ -222,38 +297,146 @@ def initialize_rag_pipeline(tools):
222
  api_key=os.getenv('GOOGLE_API_KEY'),
223
  )
224
 
225
- # Create agent
226
- agent = create_agent(tools, llama_index_llm)
 
227
 
228
- print("[LOG] βœ… RAG pipeline initialization completed")
229
- return agent
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
 
231
- def process_question(agent, question: str) -> str:
232
- """Process a question through the RAG pipeline"""
233
  print(f"[LOG] Processing question: '{question[:100]}{'...' if len(question) > 100 else ''}'")
 
 
 
 
234
  print("="*50)
235
- print("AGENT REASONING PROCESS:")
236
  print("="*50)
237
  start_time = time.time()
238
 
239
  try:
240
- # The agent.chat() call should now show the full ReAct process
241
- response = agent.chat(question)
 
242
 
243
  print("="*50)
244
- print("END OF AGENT REASONING")
245
  print("="*50)
246
 
247
  elapsed_time = time.time() - start_time
248
- print(f"[LOG] βœ… Agent response received in {elapsed_time:.2f} seconds")
249
- print(f"[LOG] Response length: {len(response.response)} characters")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
 
251
- return response.response
252
  except Exception as e:
253
  elapsed_time = time.time() - start_time
254
- print(f"[LOG] ❌ Error processing question after {elapsed_time:.2f} seconds: {e}")
 
 
 
 
 
 
 
 
 
 
255
  return f"Error processing your question: {str(e)}"
256
 
 
257
  def aswer_language_detection(response_text: str) -> str:
258
  """
259
  Detect the language of the response text.
@@ -283,12 +466,12 @@ def aswer_language_detection(response_text: str) -> str:
283
  return answer_language
284
 
285
 
286
- def process_question_with_sequential_citations(agent, question: str, chunks_directory="./data/") -> dict:
287
  """
288
- Process a question through the RAG pipeline and return response with sequential citation numbers.
289
 
290
  Args:
291
- agent: The initialized RAG agent
292
  question (str): The user's question
293
  chunks_directory (str): Path to the directory containing JSON files
294
 
@@ -297,43 +480,30 @@ def process_question_with_sequential_citations(agent, question: str, chunks_dire
297
  "response": str, # Response with sequential citation numbers [1], [2], etc.
298
  "cited_elements_json": str, # JSON array of cited elements in order
299
  "unique_ids": list, # Original source IDs in order
300
- "citation_mapping": dict # Mapping from source ID to citation number
 
301
  }
302
  """
303
- print(f"\n[LOG] === STARTING QUESTION PROCESSING ===")
304
  print(f"[LOG] Question: '{question[:150]}{'...' if len(question) > 150 else ''}'")
305
  print(f"[LOG] Chunks directory: {chunks_directory}")
306
  start_time = time.time()
307
 
 
 
308
  try:
309
- # Get the response from the agent
310
- print("\n" + "="*60)
311
- print("πŸ€– AGENT REASONING PROCESS STARTING...")
312
- print("="*60)
313
 
314
- response = agent.chat(question)
315
-
316
- print("="*60)
317
- print("πŸ€– AGENT REASONING PROCESS COMPLETED")
318
- print("="*60)
319
- response_text = response.response
320
 
321
  agent_time = time.time() - start_time
322
  print(f"[LOG] Agent processing completed in {agent_time:.2f} seconds")
323
  print(f"[LOG] Raw response length: {len(response_text)} characters")
324
 
325
- # Enhanced handling for max iterations error
326
- if ("max iterations" in response_text.lower() or
327
- "reached max iterations" in response_text.lower() or
328
- len(response_text.strip()) == 0 or
329
- "agent stopped due to max iterations" in response_text.lower()):
330
-
331
- print("[LOG] ⚠️ Detected max iterations error, providing fallback response")
332
- # Provide a more helpful fallback response
333
- response_text = ("I apologize, but I encountered difficulties processing your question within the available search iterations. "
334
- "This may be due to the complexity of your query or limitations in finding specific information in the available documents. "
335
- "Please try rephrasing your question more specifically, or break it down into smaller, more focused questions for better results.")
336
-
337
  # Extract source IDs from the response (preserving order)
338
  unique_ids = extract_source_ids(response_text)
339
 
@@ -392,11 +562,12 @@ def process_question_with_sequential_citations(agent, question: str, chunks_dire
392
 
393
  # Convert to JSON
394
  cited_elements_json = json.dumps(cited_elements_ordered, ensure_ascii=False, indent=2)
395
- aswer_language = aswer_language_detection(response_text)
396
 
397
  total_time = time.time() - start_time
398
  print(f"[LOG] βœ… Processing completed in {total_time:.2f} seconds total")
399
  print(f"[LOG] Final response length: {len(sequential_response)} characters")
 
400
  print(f"[LOG] === QUESTION PROCESSING COMPLETED ===\n")
401
 
402
  return {
@@ -404,7 +575,8 @@ def process_question_with_sequential_citations(agent, question: str, chunks_dire
404
  "cited_elements_json": cited_elements_json,
405
  "unique_ids": unique_ids,
406
  "citation_mapping": source_id_to_number,
407
- "answer_language": aswer_language
 
408
  }
409
 
410
  except Exception as e:
@@ -417,13 +589,15 @@ def process_question_with_sequential_citations(agent, question: str, chunks_dire
417
  "cited_elements_json": "[]",
418
  "unique_ids": [],
419
  "citation_mapping": {},
420
- "answer_language": "en" # Default to English if not specified
 
421
  }
422
 
423
- def process_question_with_citations(agent, question: str, chunks_directory="./data/") -> dict:
 
424
  """
425
  Legacy function - maintained for backward compatibility.
426
- Now calls the new sequential citation function.
427
  """
428
- print("[LOG] Using legacy function wrapper - redirecting to sequential citations")
429
- return process_question_with_sequential_citations(agent, question, chunks_directory)
 
1
  # -*- coding: utf-8 -*-
2
  """
3
+ Enhanced RAG Pipeline for vaccine assistant with fallback system
4
  Handles agent creation and question answering with sequential citation numbering
5
+ Includes fallback agent for max iterations handling
6
  """
7
 
8
  import json
 
107
  return sequential_response
108
 
109
 
110
+ def create_safe_custom_prompt(tools, llm, is_fallback=False):
 
111
  """Create a safe version that won't have formatting conflicts"""
112
 
113
+ print(f"[LOG] Creating {'fallback' if is_fallback else 'standard'} custom prompt with {len(tools)} tools")
114
 
115
+ if is_fallback:
116
+ custom_instructions = """
117
+ ## MEDICAL ASSISTANT ROLE - FALLBACK MODE
118
+ You are a helpful and knowledgeable AI-powered vaccine assistant designed to support doctors in clinical decision-making.
119
+ You are operating in FALLBACK MODE with access to only the most essential and comprehensive tools.
120
+ You provide evidence-based guidance using only information from official vaccine medical documents.
121
+ Answer the doctor's question accurately and concisely using only the provided information.
122
+
123
+ ## FALLBACK MODE INSTRUCTIONS
124
+ - You have access to only 2 powerful tools that search the entire main documents
125
+ - Use Guide_vector_tool for questions about the Algerian National Vaccination Guide
126
+ - Use Immunization_in_Practice_tool for questions requiring WHO global guidance
127
+ - Be direct and efficient - search once with each tool if needed, then provide your answer
128
+ - Do not overthink or search repeatedly - these tools are comprehensive
129
+
130
+ ## IMPORTANT REQUIREMENTS
131
+
132
+ ### Citation and Sourcing
133
+ 1. For each fact in your response, include an inline citation in the format [Source] immediately following the information, e.g., [e795ebd28318886c0b1a5395ac30ad90].
134
+ 2. Do NOT use 'Source:' in the citation format; use only the Source in square brackets.
135
+ 3. If a fact is supported by multiple sources, use adjacent citations: [e795ebd28318886c0b1a5395ac30ad90][21a932b2340bb16707763f57f0ad2]
136
+ 4. Use ONLY the provided information and never include facts from your general knowledge.
137
+
138
+ ### Content Formatting
139
+ 1. When rendering tables:
140
+ - Convert HTML tables into clean Markdown format
141
+ - Preserve all original headers and data rows exactly
142
+ - Include the citation in the table caption, e.g., 'Table: Vaccination Schedule [Source]'
143
+ 2. For lists, maintain the original bullet points/numbering and include citations.
144
+ 3. Present information concisely but ensure clinical accuracy is never compromised.
145
+
146
+ ### CRITICAL: Efficient Fallback Strategy
147
+ 1. **SEARCH ONCE**: Use each tool at most once - they are comprehensive and powerful
148
+ 2. **BE DECISIVE**: Once you find relevant information, formulate your response immediately
149
+ 3. **ANSWER DIRECTLY**: Provide a clear, direct answer based on the information found
150
+ 4. **STOP WHEN SUFFICIENT**: If you have found adequate information, provide the response and stop
151
+ 5. **COMPREHENSIVE COVERAGE**: These tools search entire documents, so one search should be sufficient
152
+
153
+ ### Response Guidelines
154
+ - Start with the most relevant tool for the question
155
+ - If the question requires both Algerian-specific and global context, use both tools once each
156
+ - Provide whatever information you find with proper citations
157
+ - If information is limited, clearly state what is and isn't available in the documents
158
+
159
+ ---
160
+
161
+ """
162
+ else:
163
+ custom_instructions = """
164
  ## MEDICAL ASSISTANT ROLE
165
  You are a helpful and knowledgeable AI-powered vaccine assistant designed to support doctors in clinical decision-making.
166
  You provide evidence-based guidance using only information from official vaccine medical documents.
 
224
  template_vars=original_prompt.template_vars,
225
  metadata=original_prompt.metadata if hasattr(original_prompt, 'metadata') else None
226
  )
227
+ print(f"[LOG] βœ… Successfully created {'fallback' if is_fallback else 'standard'} custom prompt")
228
  return new_prompt
229
  except:
230
  # Even safer fallback
231
+ print(f"[LOG] ⚠️ Using fallback prompt template for {'fallback' if is_fallback else 'standard'} agent")
232
  return PromptTemplate(template=safe_template)
233
 
234
+
235
+ def create_agent(tools, llm, is_fallback=False):
236
  """Create the ReAct agent with custom prompt"""
237
 
238
+ agent_type = "FALLBACK" if is_fallback else "STANDARD"
239
+ max_iter = 3 if is_fallback else 8
240
 
241
+ print(f"[LOG] Creating {agent_type} ReAct agent with {len(tools)} tools and max_iterations={max_iter}")
242
+
243
+ # Create agent with appropriate settings
244
  agent = ReActAgent.from_tools(
245
  tools,
246
  llm=llm,
247
+ verbose=True,
248
+ max_iterations=max_iter, # Reduced iterations for fallback agent
249
  )
250
 
251
+ # Create and apply appropriate custom prompt
252
  try:
253
+ safe_custom_prompt = create_safe_custom_prompt(tools, llm, is_fallback=is_fallback)
254
  agent.update_prompts({"agent_worker:system_prompt": safe_custom_prompt})
255
+ print(f"βœ… Successfully updated {agent_type} agent with custom prompt")
256
  except Exception as e:
257
+ print(f"❌ {agent_type} agent prompt update failed: {e}")
258
+ print(f"⚠️ Using original {agent_type} agent without modifications")
259
 
260
+ print(f"[LOG] {agent_type} agent creation completed")
261
  return agent
262
 
263
+
264
+ def create_fallback_tools(all_tools):
265
+ """Extract only the guide_retrieval_tool and immunization_tool for fallback agent"""
266
+
267
+ print("[LOG] Creating fallback tools (guide + immunization only)")
268
+
269
+ fallback_tools = []
270
+ tool_names_found = []
271
+
272
+ for tool in all_tools:
273
+ tool_name = tool.metadata.name if hasattr(tool, 'metadata') else str(tool)
274
+ if tool_name in ["Guide_vector_tool", "Immunization_in_Practice_tool"]:
275
+ fallback_tools.append(tool)
276
+ tool_names_found.append(tool_name)
277
+
278
+ print(f"[LOG] Found {len(fallback_tools)} fallback tools: {tool_names_found}")
279
+
280
+ if len(fallback_tools) == 0:
281
+ print("[LOG] ❌ ERROR: No fallback tools found! Check tool names.")
282
+ return None
283
+
284
+ return fallback_tools
285
+
286
+
287
  def initialize_rag_pipeline(tools):
288
+ """Initialize the RAG pipeline with both standard and fallback agents"""
289
 
290
+ print("[LOG] Initializing RAG pipeline with fallback system...")
291
  print(f"[LOG] Available tools: {[tool.metadata.name if hasattr(tool, 'metadata') else str(tool) for tool in tools]}")
292
 
293
  # Initialize LlamaIndex LLM
 
297
  api_key=os.getenv('GOOGLE_API_KEY'),
298
  )
299
 
300
+ # Create standard agent
301
+ print("[LOG] Creating standard agent...")
302
+ standard_agent = create_agent(tools, llama_index_llm, is_fallback=False)
303
 
304
+ # Create fallback tools and agent
305
+ print("[LOG] Creating fallback agent...")
306
+ fallback_tools = create_fallback_tools(tools)
307
+
308
+ if fallback_tools is None:
309
+ print("[LOG] ❌ WARNING: Fallback agent creation failed - no fallback tools available")
310
+ fallback_agent = None
311
+ else:
312
+ fallback_agent = create_agent(fallback_tools, llama_index_llm, is_fallback=True)
313
+ print("[LOG] βœ… Fallback agent created successfully")
314
+
315
+ print("[LOG] βœ… RAG pipeline initialization completed with fallback system")
316
+
317
+ return {
318
+ "standard_agent": standard_agent,
319
+ "fallback_agent": fallback_agent,
320
+ "llm": llama_index_llm
321
+ }
322
+
323
+
324
+ def detect_max_iterations_error(response_text):
325
+ """Detect if the response indicates a max iterations error"""
326
+
327
+ max_iteration_indicators = [
328
+ "max iterations",
329
+ "reached max iterations",
330
+ "agent stopped due to max iterations",
331
+ "maximum number of iterations",
332
+ "iteration limit"
333
+ ]
334
+
335
+ response_lower = response_text.lower()
336
+
337
+ # Check for max iterations indicators
338
+ for indicator in max_iteration_indicators:
339
+ if indicator in response_lower:
340
+ return True
341
+
342
+ # Check for very short or empty responses (often indicates failure)
343
+ if len(response_text.strip()) < 10:
344
+ return True
345
+
346
+ # Check for generic error patterns
347
+ if ("error" in response_lower and "processing" in response_lower):
348
+ return True
349
+
350
+ return False
351
+
352
 
353
+ def process_question(agents_dict, question: str) -> str:
354
+ """Process a question through the RAG pipeline with fallback support"""
355
  print(f"[LOG] Processing question: '{question[:100]}{'...' if len(question) > 100 else ''}'")
356
+
357
+ standard_agent = agents_dict["standard_agent"]
358
+ fallback_agent = agents_dict["fallback_agent"]
359
+
360
  print("="*50)
361
+ print("πŸ€– STANDARD AGENT REASONING PROCESS:")
362
  print("="*50)
363
  start_time = time.time()
364
 
365
  try:
366
+ # Try standard agent first
367
+ response = standard_agent.chat(question)
368
+ response_text = response.response
369
 
370
  print("="*50)
371
+ print("πŸ€– STANDARD AGENT REASONING COMPLETED")
372
  print("="*50)
373
 
374
  elapsed_time = time.time() - start_time
375
+ print(f"[LOG] βœ… Standard agent response received in {elapsed_time:.2f} seconds")
376
+ print(f"[LOG] Response length: {len(response_text)} characters")
377
+
378
+ # Check if we need to use fallback
379
+ if detect_max_iterations_error(response_text):
380
+ print("[LOG] πŸ”„ Max iterations detected, switching to FALLBACK AGENT...")
381
+
382
+ if fallback_agent is None:
383
+ print("[LOG] ❌ Fallback agent not available, returning error message")
384
+ return ("I apologize, but I encountered difficulties processing your question. "
385
+ "Please try rephrasing your question more specifically or breaking it down into smaller parts.")
386
+
387
+ print("="*50)
388
+ print("πŸ›‘οΈ FALLBACK AGENT REASONING PROCESS:")
389
+ print("="*50)
390
+
391
+ fallback_start_time = time.time()
392
+
393
+ try:
394
+ fallback_response = fallback_agent.chat(question)
395
+ fallback_text = fallback_response.response
396
+
397
+ print("="*50)
398
+ print("πŸ›‘οΈ FALLBACK AGENT REASONING COMPLETED")
399
+ print("="*50)
400
+
401
+ fallback_elapsed = time.time() - fallback_start_time
402
+ total_elapsed = time.time() - start_time
403
+
404
+ print(f"[LOG] βœ… Fallback agent response received in {fallback_elapsed:.2f} seconds")
405
+ print(f"[LOG] Total processing time: {total_elapsed:.2f} seconds")
406
+ print(f"[LOG] Fallback response length: {len(fallback_text)} characters")
407
+
408
+ # Check if fallback also failed
409
+ if detect_max_iterations_error(fallback_text):
410
+ print("[LOG] ❌ Fallback agent also hit max iterations")
411
+ return ("I apologize, but I'm having difficulty finding specific information about your question in the available documents. "
412
+ "Please try asking a more specific question or rephrasing your query.")
413
+
414
+ return fallback_text
415
+
416
+ except Exception as e:
417
+ fallback_elapsed = time.time() - fallback_start_time
418
+ print(f"[LOG] ❌ Fallback agent error after {fallback_elapsed:.2f} seconds: {e}")
419
+ return ("I apologize, but I encountered an error while processing your question. "
420
+ "Please try rephrasing your question or asking about a more specific topic.")
421
+
422
+ return response_text
423
 
 
424
  except Exception as e:
425
  elapsed_time = time.time() - start_time
426
+ print(f"[LOG] ❌ Standard agent error after {elapsed_time:.2f} seconds: {e}")
427
+
428
+ # Try fallback even on standard agent exception
429
+ if fallback_agent is not None:
430
+ print("[LOG] πŸ”„ Standard agent failed, trying FALLBACK AGENT...")
431
+ try:
432
+ fallback_response = fallback_agent.chat(question)
433
+ return fallback_response.response
434
+ except Exception as fallback_e:
435
+ print(f"[LOG] ❌ Fallback agent also failed: {fallback_e}")
436
+
437
  return f"Error processing your question: {str(e)}"
438
 
439
+
440
  def aswer_language_detection(response_text: str) -> str:
441
  """
442
  Detect the language of the response text.
 
466
  return answer_language
467
 
468
 
469
+ def process_question_with_sequential_citations(agents_dict, question: str, chunks_directory="./data/") -> dict:
470
  """
471
+ Process a question through the RAG pipeline with fallback support and return response with sequential citation numbers.
472
 
473
  Args:
474
+ agents_dict: Dictionary containing standard_agent, fallback_agent, and llm
475
  question (str): The user's question
476
  chunks_directory (str): Path to the directory containing JSON files
477
 
 
480
  "response": str, # Response with sequential citation numbers [1], [2], etc.
481
  "cited_elements_json": str, # JSON array of cited elements in order
482
  "unique_ids": list, # Original source IDs in order
483
+ "citation_mapping": dict, # Mapping from source ID to citation number
484
+ "used_fallback": bool # Whether fallback agent was used
485
  }
486
  """
487
+ print(f"\n[LOG] === STARTING QUESTION PROCESSING WITH FALLBACK SUPPORT ===")
488
  print(f"[LOG] Question: '{question[:150]}{'...' if len(question) > 150 else ''}'")
489
  print(f"[LOG] Chunks directory: {chunks_directory}")
490
  start_time = time.time()
491
 
492
+ used_fallback = False
493
+
494
  try:
495
+ # Get the response using the enhanced process_question function
496
+ response_text = process_question(agents_dict, question)
 
 
497
 
498
+ # Check if this looks like a fallback was used (simple heuristic)
499
+ if "fallback" in response_text.lower() or len(response_text) < 50:
500
+ used_fallback = True
501
+ print("[LOG] πŸ›‘οΈ Fallback agent was likely used")
 
 
502
 
503
  agent_time = time.time() - start_time
504
  print(f"[LOG] Agent processing completed in {agent_time:.2f} seconds")
505
  print(f"[LOG] Raw response length: {len(response_text)} characters")
506
 
 
 
 
 
 
 
 
 
 
 
 
 
507
  # Extract source IDs from the response (preserving order)
508
  unique_ids = extract_source_ids(response_text)
509
 
 
562
 
563
  # Convert to JSON
564
  cited_elements_json = json.dumps(cited_elements_ordered, ensure_ascii=False, indent=2)
565
+ answer_language = aswer_language_detection(response_text)
566
 
567
  total_time = time.time() - start_time
568
  print(f"[LOG] βœ… Processing completed in {total_time:.2f} seconds total")
569
  print(f"[LOG] Final response length: {len(sequential_response)} characters")
570
+ print(f"[LOG] Used fallback: {used_fallback}")
571
  print(f"[LOG] === QUESTION PROCESSING COMPLETED ===\n")
572
 
573
  return {
 
575
  "cited_elements_json": cited_elements_json,
576
  "unique_ids": unique_ids,
577
  "citation_mapping": source_id_to_number,
578
+ "answer_language": answer_language,
579
+ "used_fallback": used_fallback
580
  }
581
 
582
  except Exception as e:
 
589
  "cited_elements_json": "[]",
590
  "unique_ids": [],
591
  "citation_mapping": {},
592
+ "answer_language": "en",
593
+ "used_fallback": False
594
  }
595
 
596
+
597
+ def process_question_with_citations(agents_dict, question: str, chunks_directory="./data/") -> dict:
598
  """
599
  Legacy function - maintained for backward compatibility.
600
+ Now calls the new sequential citation function with fallback support.
601
  """
602
+ print("[LOG] Using legacy function wrapper - redirecting to sequential citations with fallback")
603
+ return process_question_with_sequential_citations(agents_dict, question, chunks_directory)