cryogenic22 commited on
Commit
80c5436
·
verified ·
1 Parent(s): a56e4f2

Update components/chat.py

Browse files
Files changed (1) hide show
  1. components/chat.py +20 -5
components/chat.py CHANGED
@@ -6,7 +6,8 @@ from utils.database import verify_vector_store
6
  from threading import Lock
7
  from typing import Optional
8
  import traceback
9
- from utils.response_formatter import ResponseFormatter, display_formatted_response
 
10
 
11
  # Create a lock for QA system access
12
  qa_lock = Lock()
@@ -38,15 +39,18 @@ def _verify_chat_ready() -> bool:
38
  return False
39
  return True
40
 
 
41
  def _process_user_message(prompt: str):
42
  """Process a new user message and generate AI response."""
43
  try:
44
  with st.spinner("Analyzing..."):
 
45
  human_message = HumanMessage(content=prompt)
46
  st.session_state.messages.append(human_message)
47
  with st.chat_message("user"):
48
  st.write(prompt)
49
 
 
50
  with qa_lock:
51
  response = st.session_state.qa_system.invoke({
52
  "input": prompt,
@@ -54,14 +58,25 @@ def _process_user_message(prompt: str):
54
  })
55
 
56
  if response:
57
- # Format the response
58
- ai_message = AIMessage(content=str(response))
 
 
 
 
 
 
59
  st.session_state.messages.append(ai_message)
 
60
  with st.chat_message("assistant"):
61
- display_formatted_response(
62
  str(response),
63
- metadata=getattr(response, 'metadata', None)
64
  )
 
 
 
 
65
 
66
  except Exception as e:
67
  st.error(f"An error occurred while processing your message: {str(e)}")
 
6
  from threading import Lock
7
  from typing import Optional
8
  import traceback
9
+ from utils.response_formatter import ResponseFormatter, display_enhanced_response
10
+
11
 
12
  # Create a lock for QA system access
13
  qa_lock = Lock()
 
39
  return False
40
  return True
41
 
42
+
43
  def _process_user_message(prompt: str):
44
  """Process a new user message and generate AI response."""
45
  try:
46
  with st.spinner("Analyzing..."):
47
+ # Create and display user message
48
  human_message = HumanMessage(content=prompt)
49
  st.session_state.messages.append(human_message)
50
  with st.chat_message("user"):
51
  st.write(prompt)
52
 
53
+ # Generate AI response with source tracking
54
  with qa_lock:
55
  response = st.session_state.qa_system.invoke({
56
  "input": prompt,
 
58
  })
59
 
60
  if response:
61
+ # Extract sources from response metadata
62
+ sources = response.metadata.get('sources', []) if hasattr(response, 'metadata') else []
63
+
64
+ # Create and display AI message with enhanced formatting
65
+ ai_message = AIMessage(
66
+ content=str(response),
67
+ additional_kwargs={'sources': sources}
68
+ )
69
  st.session_state.messages.append(ai_message)
70
+
71
  with st.chat_message("assistant"):
72
+ display_enhanced_response(
73
  str(response),
74
+ sources=sources
75
  )
76
+
77
+ st.rerun()
78
+ else:
79
+ st.error("No response received. Please try again.")
80
 
81
  except Exception as e:
82
  st.error(f"An error occurred while processing your message: {str(e)}")