Update app.py
Browse files
app.py
CHANGED
|
@@ -242,26 +242,40 @@ with st.form("chat_form", clear_on_submit=True):
|
|
| 242 |
if st.session_state.chat_history and st.session_state.chat_history[-1][0] == "user":
|
| 243 |
inputs = {"messages": st.session_state.chat_history}
|
| 244 |
|
| 245 |
-
# Create
|
| 246 |
-
|
| 247 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 248 |
|
| 249 |
# Stream the agent's response chunk-by-chunk.
|
| 250 |
for s in graph.stream(inputs, stream_mode="values"):
|
| 251 |
-
#
|
| 252 |
message = s["messages"][-1]
|
|
|
|
| 253 |
if isinstance(message, tuple):
|
|
|
|
| 254 |
role, text = message
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 255 |
else:
|
|
|
|
| 256 |
text = message.content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 257 |
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
# Update the placeholder with the current assistant message.
|
| 262 |
-
# Using unsafe_allow_html=True ensures that any markdown (or embedded HTML) is rendered.
|
| 263 |
-
response_placeholder.markdown(assistant_message, unsafe_allow_html=True)
|
| 264 |
|
| 265 |
-
# Once complete, add the full assistant response to the chat history.
|
| 266 |
-
st.session_state.chat_history.append(("assistant", assistant_message))
|
| 267 |
|
|
|
|
| 242 |
if st.session_state.chat_history and st.session_state.chat_history[-1][0] == "user":
|
| 243 |
inputs = {"messages": st.session_state.chat_history}
|
| 244 |
|
| 245 |
+
# Create separate placeholders for tool output and final answer.
|
| 246 |
+
tool_output_placeholder = st.empty()
|
| 247 |
+
final_answer_placeholder = st.empty()
|
| 248 |
+
|
| 249 |
+
# Accumulators for each section.
|
| 250 |
+
tool_output_text = ""
|
| 251 |
+
final_answer_text = ""
|
| 252 |
|
| 253 |
# Stream the agent's response chunk-by-chunk.
|
| 254 |
for s in graph.stream(inputs, stream_mode="values"):
|
| 255 |
+
# Get the last message in the chunk.
|
| 256 |
message = s["messages"][-1]
|
| 257 |
+
|
| 258 |
if isinstance(message, tuple):
|
| 259 |
+
# This is a tool output message (e.g., retrieved docs or tool usage).
|
| 260 |
role, text = message
|
| 261 |
+
tool_output_text += text
|
| 262 |
+
# Update the tool output area with a heading and the accumulated text.
|
| 263 |
+
tool_output_placeholder.markdown(
|
| 264 |
+
f"### Tool Output\n\n{tool_output_text}",
|
| 265 |
+
unsafe_allow_html=True
|
| 266 |
+
)
|
| 267 |
else:
|
| 268 |
+
# This is the final answer generated by the AI.
|
| 269 |
text = message.content
|
| 270 |
+
final_answer_text += text
|
| 271 |
+
# Update the final answer area with a heading and the accumulated text.
|
| 272 |
+
final_answer_placeholder.markdown(
|
| 273 |
+
f"### Final Answer\n\n{final_answer_text}",
|
| 274 |
+
unsafe_allow_html=True
|
| 275 |
+
)
|
| 276 |
|
| 277 |
+
# Once complete, combine both sections into the chat history.
|
| 278 |
+
combined_response = f"**Tool Output:**\n\n{tool_output_text}\n\n**Final Answer:**\n\n{final_answer_text}"
|
| 279 |
+
st.session_state.chat_history.append(("assistant", combined_response))
|
|
|
|
|
|
|
|
|
|
| 280 |
|
|
|
|
|
|
|
| 281 |
|