Jaheen07 commited on
Commit
13772bf
·
verified ·
1 Parent(s): 01e863d

Update chatbot.py

Browse files
Files changed (1) hide show
  1. chatbot.py +29 -32
chatbot.py CHANGED
@@ -819,46 +819,43 @@ class RAGChatbot:
819
  return prompt
820
 
821
  def ask(self, question: str) -> str:
822
- """Ask a question to the chatbot with learning from past conversations"""
823
  if question.lower() in ["reset data", "reset"]:
824
  self.chat_history = []
825
  self.chat_embeddings = []
826
  self.chat_index = None
827
- self.conversation_context = {'current_employee': None, 'last_mentioned_entities': []} # ADD THIS LINE
828
  self._save_chat_history()
829
  return "Chat history has been reset."
830
-
831
- # ADD THIS LINE:
832
  resolved_question = self._resolve_pronouns(question)
833
-
834
- # CHANGE 'question' to 'resolved_question' in next line:
835
  pattern = self._extract_query_pattern(resolved_question)
836
  self.query_patterns[pattern] += 1
837
-
838
- # CHANGE 'question' to 'resolved_question':
839
- relevant_past_chats = self._search_chat_history(resolved_question, k=10)
840
-
841
- # CHANGE 'question' to 'resolved_question':
842
  retrieved_data = self._retrieve(resolved_question, k=20)
843
-
844
- # CHANGE 'question' to 'resolved_question':
845
  prompt = self._build_prompt(resolved_question, retrieved_data, relevant_past_chats)
846
-
847
- # Generate response
848
- messages = [{"role": "user", "content": prompt}]
849
-
850
- response = self.llm_client.chat.completions.create(
851
  model="meta-llama/Llama-3.1-8B-Instruct",
852
- messages=messages,
853
- max_tokens=512,
854
- temperature=0.3
855
  )
856
-
857
- answer = response.choices[0].message.content
858
-
859
- # ADD THIS LINE:
860
  self._update_conversation_context(question, answer)
861
-
862
  # Store in history with timestamp and metadata
863
  chat_entry = {
864
  'timestamp': datetime.now().isoformat(),
@@ -867,26 +864,26 @@ class RAGChatbot:
867
  'pattern': pattern,
868
  'used_past_context': len(relevant_past_chats) > 0
869
  }
870
-
871
  self.chat_history.append(chat_entry)
872
-
873
  # Update chat history index with new conversation
874
  new_text = f"Q: {question}\nA: {answer}"
875
  new_embedding = self.embeddings_model.encode([new_text])
876
-
877
  if self.chat_index is None:
878
  dimension = new_embedding.shape[1]
879
  self.chat_index = faiss.IndexFlatL2(dimension)
880
  self.chat_embeddings = new_embedding
881
  else:
882
  self.chat_embeddings = np.vstack([self.chat_embeddings, new_embedding])
883
-
884
  self.chat_index.add(np.array(new_embedding).astype('float32'))
885
-
886
  # Save to disk after each conversation
887
  self._save_chat_history()
888
  self._save_learning_stats()
889
-
890
  return answer
891
 
892
  def provide_feedback(self, question: str, rating: int):
 
819
  return prompt
820
 
821
  def ask(self, question: str) -> str:
822
+ """Ask a question to the chatbot with learning from past conversations"""
823
  if question.lower() in ["reset data", "reset"]:
824
  self.chat_history = []
825
  self.chat_embeddings = []
826
  self.chat_index = None
827
+ self.conversation_context = {'current_employee': None, 'last_mentioned_entities': []}
828
  self._save_chat_history()
829
  return "Chat history has been reset."
830
+
831
+ # Resolve pronouns before processing
832
  resolved_question = self._resolve_pronouns(question)
833
+
834
+ # Extract query pattern for learning
835
  pattern = self._extract_query_pattern(resolved_question)
836
  self.query_patterns[pattern] += 1
837
+
838
+ # Search through past conversations for similar questions
839
+ relevant_past_chats = self._search_chat_history(resolved_question, k=5)
840
+
841
+ # Retrieve relevant chunks (use resolved question for better retrieval)
842
  retrieved_data = self._retrieve(resolved_question, k=20)
843
+
844
+ # Build prompt with both document context and learned information
845
  prompt = self._build_prompt(resolved_question, retrieved_data, relevant_past_chats)
846
+
847
+ # CORRECT: Use text_generation for InferenceClient
848
+ answer = self.llm_client.text_generation(
849
+ prompt,
 
850
  model="meta-llama/Llama-3.1-8B-Instruct",
851
+ max_new_tokens=512,
852
+ temperature=0.3,
853
+ return_full_text=False
854
  )
855
+
856
+ # Update conversation context after each exchange
 
 
857
  self._update_conversation_context(question, answer)
858
+
859
  # Store in history with timestamp and metadata
860
  chat_entry = {
861
  'timestamp': datetime.now().isoformat(),
 
864
  'pattern': pattern,
865
  'used_past_context': len(relevant_past_chats) > 0
866
  }
867
+
868
  self.chat_history.append(chat_entry)
869
+
870
  # Update chat history index with new conversation
871
  new_text = f"Q: {question}\nA: {answer}"
872
  new_embedding = self.embeddings_model.encode([new_text])
873
+
874
  if self.chat_index is None:
875
  dimension = new_embedding.shape[1]
876
  self.chat_index = faiss.IndexFlatL2(dimension)
877
  self.chat_embeddings = new_embedding
878
  else:
879
  self.chat_embeddings = np.vstack([self.chat_embeddings, new_embedding])
880
+
881
  self.chat_index.add(np.array(new_embedding).astype('float32'))
882
+
883
  # Save to disk after each conversation
884
  self._save_chat_history()
885
  self._save_learning_stats()
886
+
887
  return answer
888
 
889
  def provide_feedback(self, question: str, rating: int):