hamxaameer commited on
Commit
3e60f9f
Β·
verified Β·
1 Parent(s): bd2bde4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -22
app.py CHANGED
@@ -406,15 +406,17 @@ Provide a complete, detailed answer (150-250 words):"""
406
  logger.error(f" βœ— Generation error: {e}")
407
  return None
408
 
409
- def synthesize_direct_answer(
410
- # Removed synthetic fallback - only use LLM
 
 
411
  def generate_answer_langchain(
412
  query: str,
413
  vectorstore,
414
  llm_client
415
  ) -> str:
416
  """
417
- Main RAG pipeline: Retrieve β†’ Generate β†’ Fallback
418
  """
419
  logger.info(f"\n{'='*80}")
420
  logger.info(f"Processing query: '{query}'")
@@ -442,10 +444,10 @@ def generate_answer_langchain(
442
  else:
443
  logger.warning(f" β†’ Attempt {attempt}/4 failed, retrying...")
444
 
445
- # Step 3: Fallback if all attempts fail
446
  if not llm_answer:
447
- logger.error(f" βœ— All 4 LLM attempts failed - using enhanced fallback")
448
- llm_answer = synthesize_direct_answer(query, retrieved_docs)
449
 
450
  return llm_answer
451
 
@@ -470,12 +472,12 @@ def fashion_chatbot(message: str, history: List[List[str]]):
470
  message.strip(),
471
  vectorstore,
472
  top_k=CONFIG["top_k"]
473
- # Step 3: If all attempts fail, return error
474
- if not llm_answer:
475
- logger.error(f" βœ— All 4 LLM attempts failed")
476
- return "I apologize, but I'm having trouble generating a response. Please try rephrasing your question or ask something else."
477
-
478
- return llm_answer
479
  # Show generating indicator
480
  yield f"πŸ’­ Generating answer ({len(retrieved_docs)} sources found)..."
481
 
@@ -488,12 +490,11 @@ def fashion_chatbot(message: str, history: List[List[str]]):
488
  if llm_answer:
489
  break
490
 
491
- # Fallback if needed
492
  if not llm_answer:
493
- logger.error(f" βœ— All LLM attempts failed - using enhanced fallback")
494
- llm_answer = synthesize_direct_answer(message.strip(), retrieved_docs)
495
- # Notify user about the issue
496
- yield "⚠️ LLM temporarily unavailable, providing knowledge-based answer...\n\n"
497
 
498
  # Stream the answer word by word for natural flow
499
  import time
@@ -515,11 +516,12 @@ def fashion_chatbot(message: str, history: List[List[str]]):
515
  # ============================================================================
516
  # INITIALIZE AND LAUNCH
517
  # ============================================================================
518
- # If LLM fails, show error
519
- if not llm_answer:
520
- logger.error(f" βœ— All LLM attempts failed")
521
- yield "I apologize, but I'm having trouble generating a response. Please try rephrasing your question."
522
- return
 
523
  def startup():
524
  """Initialize all models and load vector store"""
525
  global llm_client, embeddings, vectorstore
 
406
  logger.error(f" βœ— Generation error: {e}")
407
  return None
408
 
409
+ # ============================================================================
410
+ # MAIN RAG FUNCTION
411
+ # ============================================================================
412
+
413
  def generate_answer_langchain(
414
  query: str,
415
  vectorstore,
416
  llm_client
417
  ) -> str:
418
  """
419
+ Main RAG pipeline: Retrieve β†’ Generate (no fallback)
420
  """
421
  logger.info(f"\n{'='*80}")
422
  logger.info(f"Processing query: '{query}'")
 
444
  else:
445
  logger.warning(f" β†’ Attempt {attempt}/4 failed, retrying...")
446
 
447
+ # Step 3: If all attempts fail, return error
448
  if not llm_answer:
449
+ logger.error(f" βœ— All 4 LLM attempts failed")
450
+ return "I apologize, but I'm having trouble generating a response. Please try rephrasing your question or ask something else."
451
 
452
  return llm_answer
453
 
 
472
  message.strip(),
473
  vectorstore,
474
  top_k=CONFIG["top_k"]
475
+ )
476
+
477
+ if not retrieved_docs:
478
+ yield "I couldn't find relevant information to answer your question."
479
+ return
480
+
481
  # Show generating indicator
482
  yield f"πŸ’­ Generating answer ({len(retrieved_docs)} sources found)..."
483
 
 
490
  if llm_answer:
491
  break
492
 
493
+ # If LLM fails, show error
494
  if not llm_answer:
495
+ logger.error(f" βœ— All LLM attempts failed")
496
+ yield "I apologize, but I'm having trouble generating a response. Please try rephrasing your question."
497
+ return
 
498
 
499
  # Stream the answer word by word for natural flow
500
  import time
 
516
  # ============================================================================
517
  # INITIALIZE AND LAUNCH
518
  # ============================================================================
519
+
520
+ # Global variables
521
+ llm_client = None
522
+ embeddings = None
523
+ vectorstore = None
524
+
525
  def startup():
526
  """Initialize all models and load vector store"""
527
  global llm_client, embeddings, vectorstore