Nada commited on
Commit
540d76a
·
1 Parent(s): 75ab808
Files changed (1) hide show
  1. chatbot.py +21 -11
chatbot.py CHANGED
@@ -846,19 +846,29 @@ Would you like to connect with a professional now, or would you prefer to keep t
846
 
847
  # Generate a follow-up question if the response is too short
848
  if len(response_text.split()) < 20 and not response_text.endswith('?'):
849
- follow_up_prompt = f"""Based on the conversation so far:
 
850
  {chr(10).join([f"{msg['role']}: {msg['text']}" for msg in conversation_history[-3:]])}
851
 
852
- Generate a thoughtful follow-up question that:
853
- 1. Shows you're actively listening
854
- 2. Encourages deeper exploration
855
- 3. Maintains therapeutic rapport
856
- 4. Is open-ended and non-judgmental
857
-
858
- Respond with just the question."""
859
-
860
- follow_up = self.llm.invoke(follow_up_prompt)
861
- response_text += f"\n\n{follow_up}"
 
 
 
 
 
 
 
 
 
862
 
863
  # assistant response -> conversation history
864
  assistant_message = Message(
 
846
 
847
  # Generate a follow-up question if the response is too short
848
  if len(response_text.split()) < 20 and not response_text.endswith('?'):
849
+ follow_up_prompt = f"""
850
+ Recent conversation:
851
  {chr(10).join([f"{msg['role']}: {msg['text']}" for msg in conversation_history[-3:]])}
852
 
853
+ Now, write a single empathetic and open-ended question to encourage the user to share more.
854
+ Respond with just the question, no explanation.
855
+ """
856
+ follow_up = self.llm.invoke(follow_up_prompt).strip()
857
+ # Clean and extract only the actual question (first sentence ending with '?')
858
+ matches = re.findall(r'([^\n.?!]*\?)', follow_up)
859
+ if matches:
860
+ question = matches[0].strip()
861
+ else:
862
+ question = follow_up.strip().split('\n')[0]
863
+ # If the main response is very short, return just the question
864
+ if len(response_text.split()) < 5:
865
+ response_text = question
866
+ else:
867
+ response_text = f"{response_text}\n\n{question}"
868
+
869
+ # Final post-processing: remove any LLM commentary that may have leaked in
870
+ response_text = response_text.strip()
871
+ response_text = re.sub(r"(Your response|This response).*", "", response_text, flags=re.IGNORECASE).strip()
872
 
873
  # assistant response -> conversation history
874
  assistant_message = Message(