NavyDevilDoc commited on
Commit
8170034
Β·
verified Β·
1 Parent(s): dd05053

Update src/app.py

Browse files
Files changed (1) hide show
  1. src/app.py +37 -23
src/app.py CHANGED
@@ -209,30 +209,15 @@ tab1, tab2, tab3 = st.tabs(["πŸ’¬ Chat Playground", "πŸ“‚ Knowledge & Tools", "
209
 
210
  # === TAB 1: CHAT ===
211
  with tab1:
212
- st.header("Discussion & Analysis")
213
-
214
- # Chat History Downloader
215
- # Only show the button if there is actually a conversation to save
216
- if st.session_state.get("messages"):
217
- # 1. Format the history log
218
- chat_log = f"# βš“ Navy AI Toolkit - Chat Log\n"
219
- chat_log += f"Date: {datetime.now().strftime('%Y-%m-%d %H:%M')}\n"
220
- chat_log += f"Model: {st.session_state.get('model_selector_radio', 'Unknown')}\n\n"
221
- chat_log += "---\n\n"
222
-
223
- for msg in st.session_state.messages:
224
- role = msg["role"].upper()
225
- content = msg["content"]
226
- chat_log += f"**{role}**: {content}\n\n"
227
-
228
- # 2. Render the Download Button (Right below header)
229
- st.download_button(
230
- label="πŸ’Ύ Download Conversation",
231
- data=chat_log,
232
- file_name=f"chat_session_{datetime.now().strftime('%Y%m%d_%H%M')}.md",
233
- mime="text/markdown"
234
- )
235
 
 
236
  if "messages" not in st.session_state: st.session_state.messages = []
237
 
238
  # RENDER DEBUG OVERLAY (If enabled in Admin)
@@ -241,11 +226,16 @@ with tab1:
241
  c1, c2 = st.columns([3, 1])
242
  with c1: st.caption(f"Active Model: **{st.session_state.get('model_selector_radio', 'Granite')}**")
243
  with c2: use_rag = st.toggle("Enable Knowledge Base", value=False)
 
 
244
  for msg in st.session_state.messages:
245
  with st.chat_message(msg["role"]): st.markdown(msg["content"])
 
 
246
  if prompt := st.chat_input("Input command..."):
247
  st.session_state.messages.append({"role": "user", "content": prompt})
248
  with st.chat_message("user"): st.markdown(prompt)
 
249
  context_txt = ""
250
  sys_p = "You are a helpful AI assistant."
251
  st.session_state.last_context_used = "" # Reset context debug
@@ -267,6 +257,7 @@ with tab1:
267
  context_txt += f"<document index='{i+1}' source='{src}'>\n{d.page_content}\n</document>\n"
268
  # Debug Capture
269
  st.session_state.last_context_used = context_txt
 
270
  if context_txt:
271
  final_prompt = f"User Question: {prompt}\n\n<context>\n{context_txt}\n</context>\n\nInstruction: Answer using the context above."
272
  else: final_prompt = prompt
@@ -280,10 +271,33 @@ with tab1:
280
  m_name = "GPT-4o" if "GPT-4o" in model_choice else model_choice.split()[0]
281
  tracker.log_usage(m_name, usage["input"], usage["output"])
282
  update_sidebar_metrics()
 
283
  st.session_state.messages.append({"role": "assistant", "content": resp})
 
284
  if use_rag and context_txt:
285
  with st.expander("πŸ“š View Context Used"): st.text(context_txt)
286
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
  # === TAB 2: KNOWLEDGE & TOOLS ===
288
  with tab2:
289
  st.header("Document Processor")
 
209
 
210
  # === TAB 1: CHAT ===
211
  with tab1:
212
+ # 1. LAYOUT: Header + Placeholder for Download Button
213
+ col_header, col_btn = st.columns([6, 1])
214
+ with col_header:
215
+ st.header("Discussion & Analysis")
216
+
217
+ # Reserve a spot for the button so we can render it LATER (after the chat updates)
218
+ download_placeholder = col_btn.empty()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
 
220
+ # 2. CHAT LOGIC
221
  if "messages" not in st.session_state: st.session_state.messages = []
222
 
223
  # RENDER DEBUG OVERLAY (If enabled in Admin)
 
226
  c1, c2 = st.columns([3, 1])
227
  with c1: st.caption(f"Active Model: **{st.session_state.get('model_selector_radio', 'Granite')}**")
228
  with c2: use_rag = st.toggle("Enable Knowledge Base", value=False)
229
+
230
+ # Display existing history
231
  for msg in st.session_state.messages:
232
  with st.chat_message(msg["role"]): st.markdown(msg["content"])
233
+
234
+ # Handle New Input
235
  if prompt := st.chat_input("Input command..."):
236
  st.session_state.messages.append({"role": "user", "content": prompt})
237
  with st.chat_message("user"): st.markdown(prompt)
238
+
239
  context_txt = ""
240
  sys_p = "You are a helpful AI assistant."
241
  st.session_state.last_context_used = "" # Reset context debug
 
257
  context_txt += f"<document index='{i+1}' source='{src}'>\n{d.page_content}\n</document>\n"
258
  # Debug Capture
259
  st.session_state.last_context_used = context_txt
260
+
261
  if context_txt:
262
  final_prompt = f"User Question: {prompt}\n\n<context>\n{context_txt}\n</context>\n\nInstruction: Answer using the context above."
263
  else: final_prompt = prompt
 
271
  m_name = "GPT-4o" if "GPT-4o" in model_choice else model_choice.split()[0]
272
  tracker.log_usage(m_name, usage["input"], usage["output"])
273
  update_sidebar_metrics()
274
+
275
  st.session_state.messages.append({"role": "assistant", "content": resp})
276
+
277
  if use_rag and context_txt:
278
  with st.expander("πŸ“š View Context Used"): st.text(context_txt)
279
 
280
+ # 3. LATE RENDER: Fill the Download Button Placeholder
281
+ # This runs AFTER the new message is appended, so the log is complete.
282
+ if st.session_state.messages:
283
+ chat_log = f"# βš“ Navy AI Toolkit - Chat Log\n"
284
+ chat_log += f"Date: {datetime.now().strftime('%Y-%m-%d %H:%M')}\n"
285
+ chat_log += f"Model: {st.session_state.get('model_selector_radio', 'Unknown')}\n\n"
286
+ chat_log += "---\n\n"
287
+
288
+ for msg in st.session_state.messages:
289
+ role = msg["role"].upper()
290
+ content = msg["content"]
291
+ chat_log += f"**{role}**: {content}\n\n"
292
+
293
+ with download_placeholder:
294
+ st.download_button(
295
+ label="πŸ’Ύ Save",
296
+ data=chat_log,
297
+ file_name=f"chat_{datetime.now().strftime('%Y%m%d_%H%M')}.md",
298
+ mime="text/markdown"
299
+ )
300
+
301
  # === TAB 2: KNOWLEDGE & TOOLS ===
302
  with tab2:
303
  st.header("Document Processor")