NavyDevilDoc commited on
Commit
ef8b779
·
verified ·
1 Parent(s): 5aa9482

Update src/app.py

Browse files
Files changed (1) hide show
  1. src/app.py +8 -573
src/app.py CHANGED
@@ -108,7 +108,8 @@ with st.sidebar:
108
  model_choice = st.radio(
109
  "Choose your Intelligence:",
110
  model_options,
111
- captions=model_captions
 
112
  )
113
  st.info(f"Connected to: **{model_choice}**")
114
 
@@ -140,9 +141,6 @@ def query_local_model(messages, max_tokens, model_name):
140
  url = API_URL_ROOT + "/generate"
141
 
142
  # --- FLATTEN MESSAGE HISTORY ---
143
- # Since the backend expects a single string ("text"), we format the history here.
144
- # We extract the system persona separately to pass to the 'persona' field.
145
-
146
  formatted_history = ""
147
  system_persona = "You are a helpful assistant." # Default
148
 
@@ -158,7 +156,7 @@ def query_local_model(messages, max_tokens, model_name):
158
  formatted_history += "Assistant: "
159
 
160
  payload = {
161
- "text": formatted_history, # <--- History goes here
162
  "persona": system_persona,
163
  "max_tokens": max_tokens,
164
  "model": model_name
@@ -208,7 +206,6 @@ def clean_text(text):
208
 
209
  def ask_ai(user_prompt, system_persona, max_tokens):
210
  # 1. Standardize Input: Convert the strings into the Message List format
211
- # This ensures compatibility with our new memory-aware backend functions
212
  messages_payload = [
213
  {"role": "system", "content": system_persona},
214
  {"role": "user", "content": user_prompt}
@@ -216,12 +213,9 @@ def ask_ai(user_prompt, system_persona, max_tokens):
216
 
217
  # 2. Routing Logic
218
  if "GPT-4o" in model_choice:
219
- # CORRECTED: Now calls the OpenAI function
220
  return query_openai_model(messages_payload, max_tokens)
221
  else:
222
- # Lookup the technical name for Ollama
223
  technical_name = model_map[model_choice]
224
- # Calls the Local function
225
  return query_local_model(messages_payload, max_tokens, technical_name)
226
 
227
  # --- MAIN UI ---
@@ -283,15 +277,12 @@ with tab1:
283
  st.session_state.email_draft = reply
284
 
285
  if usage:
286
- # 1. Determine a clean name for the log
287
  if "GPT-4o" in model_choice:
288
  m_name = "GPT-4o"
289
  else:
290
- # Use the first word of the model choice (e.g., "Llama", "Gemma", "Granite")
291
  m_name = model_choice.split(" ")[0]
292
- # 2. Log it
293
  tracker.log_usage(m_name, usage["input"], usage["output"])
294
- update_sidebar_metrics() # Force update
295
 
296
  if st.session_state.email_draft:
297
  st.subheader("Draft Result")
@@ -306,362 +297,20 @@ with tab2:
306
  st.session_state.messages = []
307
 
308
  # --- CONTROLS AND METRICS ---
309
- # The controls are kept outside the chat loop.
310
  c1, c2, c3 = st.columns([2, 1, 1])
311
  with c1:
312
- # Use the global model_choice from the sidebar/tab1 initialization
313
  selected_model_name = st.session_state.get('model_choice', 'Granite 4 (IBM)')
314
  with c2:
315
  use_rag = st.toggle("🔌 Enable Knowledge Base", value=False)
316
- # The token progress bar will be handled inside the prompt logic based on input length
317
-
318
- with c3:
319
- # --- NEW FEATURE: DOWNLOAD CHAT ---
320
- # Convert history to a readable string
321
- chat_log = ""
322
- for msg in st.session_state.messages:
323
- role = "USER" if msg['role'] == 'user' else "ASSISTANT"
324
- chat_log += f"[{role}]: {msg['content']}\n\n"
325
-
326
- # Only show button if there is history to save
327
- if chat_log:
328
- st.download_button(
329
- label="💾 Save Chat",
330
- data=chat_log,
331
- file_name="mission_log.txt",
332
- mime="text/plain",
333
- help="Download the current conversation history."
334
- )
335
-
336
- st.divider()
337
-
338
- # --- DISPLAY CONVERSATION HISTORY ---
339
- for message in st.session_state.messages:
340
- with st.chat_message(message["role"]):
341
- st.markdown(message["content"])
342
-
343
- # --- CHAT INPUT HANDLING import streamlit as st
344
- import requests
345
- import os
346
- import unicodedata
347
- import resources # Assuming this file exists in your repo
348
- import tracker
349
- import rag_engine # Now safe to import at top level (lazy loading enabled)
350
- from openai import OpenAI
351
- from datetime import datetime
352
-
353
- # --- CONFIGURATION ---
354
- st.set_page_config(page_title="Navy AI Toolkit", page_icon="⚓", layout="wide")
355
-
356
- # 1. SETUP CREDENTIALS
357
- API_URL_ROOT = os.getenv("API_URL") # For Ollama models
358
- OPENAI_KEY = os.getenv("OPENAI_API_KEY") # For GPT-4o
359
-
360
- # --- INITIALIZATION ---
361
- if "roles" not in st.session_state:
362
- st.session_state.roles = []
363
-
364
- # --- LOGIN / REGISTER LOGIC ---
365
- if "authentication_status" not in st.session_state or st.session_state["authentication_status"] is None:
366
- # If not logged in, show tabs
367
- login_tab, register_tab = st.tabs(["���� Login", "📝 Register"])
368
-
369
- with login_tab:
370
- is_logged_in = tracker.check_login()
371
- # FIX: Trigger User DB Download ONLY on fresh login
372
- if is_logged_in:
373
- tracker.download_user_db(st.session_state.username)
374
- st.rerun() # Refresh to show the app
375
-
376
- with register_tab:
377
- st.header("Create Account")
378
- with st.form("reg_form"):
379
- new_user = st.text_input("Username")
380
- new_name = st.text_input("Display Name")
381
- new_email = st.text_input("Email")
382
- new_pwd = st.text_input("Password", type="password")
383
- invite = st.text_input("Invitation Passcode")
384
-
385
- if st.form_submit_button("Register"):
386
- success, msg = tracker.register_user(new_email, new_user, new_name, new_pwd, invite)
387
- if success:
388
- st.success(msg)
389
- else:
390
- st.error(msg)
391
-
392
- # Stop execution if not logged in
393
- if not st.session_state.get("authentication_status"):
394
- st.stop()
395
-
396
- # --- GLOBAL PLACEHOLDERS ---
397
- metric_placeholder = None
398
- admin_metric_placeholder = None
399
-
400
- # --- SIDEBAR (CONSOLIDATED) ---
401
- with st.sidebar:
402
- st.header("👤 User Profile")
403
- st.write(f"Welcome, **{st.session_state.name}**")
404
-
405
- st.header("📊 Usage Tracker")
406
- metric_placeholder = st.empty()
407
-
408
- # Admin Tools
409
- if "admin" in st.session_state.roles:
410
- st.divider()
411
- st.header("🛡️ Admin Tools")
412
- admin_metric_placeholder = st.empty()
413
-
414
- # FIX: Point to the correct persistence path
415
- log_path = tracker.get_log_path()
416
- if log_path.exists():
417
- with open(log_path, "r") as f:
418
- log_data = f.read()
419
- st.download_button(
420
- label="📥 Download Usage Logs",
421
- data=log_data,
422
- file_name=f"usage_log_{datetime.now().strftime('%Y-%m-%d')}.json",
423
- mime="application/json"
424
- )
425
- else:
426
- st.warning("No logs found yet.")
427
-
428
- st.divider()
429
-
430
- # --- MODEL SELECTOR ---
431
- st.header("🧠 Model Selector")
432
-
433
- model_map = {
434
- "Granite 4 (IBM)": "granite4:latest",
435
- "Llama 3.2 (Meta)": "llama3.2:latest",
436
- "Gemma 3 (Google)": "gemma3:latest"
437
- }
438
-
439
- model_options = list(model_map.keys())
440
- model_captions = ["Slower for now, but free and private" for _ in model_options]
441
-
442
- if "admin" in st.session_state.roles:
443
- model_options.append("GPT-4o (Omni)")
444
- model_captions.append("Fast, smart, sends data to OpenAI")
445
-
446
- model_choice = st.radio(
447
- "Choose your Intelligence:",
448
- model_options,
449
- captions=model_captions
450
- )
451
- st.info(f"Connected to: **{model_choice}**")
452
-
453
- st.divider()
454
- st.header("⚙️ Controls")
455
- max_len = st.slider("Max Response Length (Tokens)", 100, 2000, 500)
456
-
457
- # --- HELPER FUNCTIONS ---
458
- def update_sidebar_metrics():
459
- """Refreshes the global placeholders defined in the sidebar."""
460
- if metric_placeholder is None:
461
- return
462
-
463
- stats = tracker.get_daily_stats()
464
- user_stats = stats["users"].get(st.session_state.username, {"input":0, "output":0})
465
-
466
- metric_placeholder.metric("My Tokens Today", user_stats["input"] + user_stats["output"])
467
-
468
- if "admin" in st.session_state.roles and admin_metric_placeholder is not None:
469
- admin_metric_placeholder.metric("Team Total Today", stats["total_tokens"])
470
-
471
- # Call metrics once on load
472
- update_sidebar_metrics()
473
-
474
- def query_local_model(messages, max_tokens, model_name):
475
- if not API_URL_ROOT:
476
- return "Error: API_URL not set.", None
477
-
478
- url = API_URL_ROOT + "/generate"
479
-
480
- # --- FLATTEN MESSAGE HISTORY ---
481
- # Since the backend expects a single string ("text"), we format the history here.
482
- # We extract the system persona separately to pass to the 'persona' field.
483
-
484
- formatted_history = ""
485
- system_persona = "You are a helpful assistant." # Default
486
-
487
- for msg in messages:
488
- if msg['role'] == 'system':
489
- system_persona = msg['content']
490
- elif msg['role'] == 'user':
491
- formatted_history += f"User: {msg['content']}\n"
492
- elif msg['role'] == 'assistant':
493
- formatted_history += f"Assistant: {msg['content']}\n"
494
-
495
- # Append the "Assistant:" prompt at the end to cue the model
496
- formatted_history += "Assistant: "
497
-
498
- payload = {
499
- "text": formatted_history, # <--- History goes here
500
- "persona": system_persona,
501
- "max_tokens": max_tokens,
502
- "model": model_name
503
- }
504
-
505
- try:
506
- response = requests.post(url, json=payload, timeout=300)
507
-
508
- if response.status_code == 200:
509
- response_data = response.json()
510
- ans = response_data.get("response", "")
511
- usage = response_data.get("usage", {"input":0, "output":0})
512
- return ans, usage
513
-
514
- return f"Error {response.status_code}: {response.text}", None
515
-
516
- except Exception as e:
517
- return f"Connection Error: {e}", None
518
-
519
- def query_openai_model(messages, max_tokens):
520
- if not OPENAI_KEY:
521
- return "Error: OPENAI_API_KEY not set.", None
522
-
523
- client = OpenAI(api_key=OPENAI_KEY)
524
 
525
- try:
526
- response = client.chat.completions.create(
527
- model="gpt-4o",
528
- max_tokens=max_tokens,
529
- messages=messages,
530
- temperature=0.3
531
- )
532
- usage_obj = response.usage
533
- usage_dict = {"input": usage_obj.prompt_tokens, "output": usage_obj.completion_tokens}
534
- return response.choices[0].message.content, usage_dict
535
-
536
- except Exception as e:
537
- return f"OpenAI Error: {e}", None
538
-
539
- def clean_text(text):
540
- if not text: return ""
541
- text = unicodedata.normalize('NFKC', text)
542
- replacements = {'“': '"', '”': '"', '‘': "'", '’': "'", '–': '-', '—': '-', '…': '...', '\u00a0': ' '}
543
- for old, new in replacements.items():
544
- text = text.replace(old, new)
545
- return text.strip()
546
-
547
- def ask_ai(user_prompt, system_persona, max_tokens):
548
- # 1. Standardize Input: Convert the strings into the Message List format
549
- # This ensures compatibility with our new memory-aware backend functions
550
- messages_payload = [
551
- {"role": "system", "content": system_persona},
552
- {"role": "user", "content": user_prompt}
553
- ]
554
-
555
- # 2. Routing Logic
556
- if "GPT-4o" in model_choice:
557
- # CORRECTED: Now calls the OpenAI function
558
- return query_openai_model(messages_payload, max_tokens)
559
- else:
560
- # Lookup the technical name for Ollama
561
- technical_name = model_map[model_choice]
562
- # Calls the Local function
563
- return query_local_model(messages_payload, max_tokens, technical_name)
564
-
565
- # --- MAIN UI ---
566
- st.title("AI Toolkit")
567
- tab1, tab2, tab3, tab4 = st.tabs(["📧 Email Builder", "💬 Chat Playground", "🛠️ Prompt Architect", "📚 Knowledge Base"])
568
-
569
- # --- TAB 1: EMAIL BUILDER ---
570
- with tab1:
571
- st.header("Structured Email Generator")
572
- if "email_draft" not in st.session_state:
573
- st.session_state.email_draft = ""
574
-
575
- st.subheader("1. Define the Voice")
576
- style_mode = st.radio("How should the AI write?", ["Use a Preset Persona", "Mimic My Style"], horizontal=True)
577
-
578
- selected_persona_instruction = ""
579
- if style_mode == "Use a Preset Persona":
580
- persona_name = st.selectbox("Select a Persona", list(resources.TONE_LIBRARY.keys()))
581
- selected_persona_instruction = resources.TONE_LIBRARY[persona_name]
582
- st.info(f"**System Instruction:** {selected_persona_instruction}")
583
- else:
584
- st.info("Upload 1-3 text files of your previous emails.")
585
- uploaded_style_files = st.file_uploader("Upload Samples (.txt)", type=["txt"], accept_multiple_files=True)
586
- if uploaded_style_files:
587
- style_context = ""
588
- for uploaded_file in uploaded_style_files:
589
- string_data = uploaded_file.read().decode("utf-8")
590
- style_context += f"---\n{string_data}\n---\n"
591
- selected_persona_instruction = f"Analyze these examples and mimic the style:\n{style_context}"
592
-
593
- st.divider()
594
- st.subheader("2. Details")
595
- c1, c2 = st.columns(2)
596
- with c1: recipient = st.text_input("Recipient")
597
- with c2: topic = st.text_input("Topic")
598
-
599
- st.caption("Content Source")
600
- input_method = st.toggle("Upload notes file?")
601
- raw_notes = ""
602
- if input_method:
603
- notes_file = st.file_uploader("Upload Notes (.txt)", type=["txt"])
604
- if notes_file: raw_notes = notes_file.read().decode("utf-8")
605
- else:
606
- raw_notes = st.text_area("Paste notes:", height=150)
607
-
608
- # Context Bar
609
- est_tokens = len(raw_notes) / 4
610
- st.progress(min(est_tokens / 128000, 1.0), text=f"Context: {int(est_tokens)} tokens")
611
-
612
- if st.button("Draft Email", type="primary"):
613
- if not raw_notes:
614
- st.warning("Please provide notes.")
615
- else:
616
- clean_notes = clean_text(raw_notes)
617
- with st.spinner(f"Drafting with {model_choice}..."):
618
- prompt = f"TASK: Write email.\nTO: {recipient}\nTOPIC: {topic}\nSTYLE: {selected_persona_instruction}\nDATA: {clean_notes}"
619
-
620
- reply, usage = ask_ai(prompt, "You are an expert ghostwriter.", max_len)
621
- st.session_state.email_draft = reply
622
-
623
- if usage:
624
- # 1. Determine a clean name for the log
625
- if "GPT-4o" in model_choice:
626
- m_name = "GPT-4o"
627
- else:
628
- # Use the first word of the model choice (e.g., "Llama", "Gemma", "Granite")
629
- m_name = model_choice.split(" ")[0]
630
- # 2. Log it
631
- tracker.log_usage(m_name, usage["input"], usage["output"])
632
- update_sidebar_metrics() # Force update
633
-
634
- if st.session_state.email_draft:
635
- st.subheader("Draft Result")
636
- st.text_area("Copy your email:", value=st.session_state.email_draft, height=300)
637
-
638
- # --- TAB 2: CHAT PLAYGROUND ---
639
- with tab2:
640
- st.header("Choose Your Model and Start a Discussion")
641
-
642
- # --- INITIALIZE CHAT MEMORY (MUST BE DONE FIRST) ---
643
- if "messages" not in st.session_state:
644
- st.session_state.messages = []
645
-
646
- # --- CONTROLS AND METRICS ---
647
- # The controls are kept outside the chat loop.
648
- c1, c2, c3 = st.columns([2, 1, 1])
649
- with c1:
650
- # Use the global model_choice from the sidebar/tab1 initialization
651
- selected_model_name = st.session_state.get('model_choice', 'Granite 4 (IBM)')
652
- with c2:
653
- use_rag = st.toggle("🔌 Enable Knowledge Base", value=False)
654
- # The token progress bar will be handled inside the prompt logic based on input length
655
-
656
  with c3:
657
  # --- NEW FEATURE: DOWNLOAD CHAT ---
658
- # Convert history to a readable string
659
  chat_log = ""
660
  for msg in st.session_state.messages:
661
  role = "USER" if msg['role'] == 'user' else "ASSISTANT"
662
  chat_log += f"[{role}]: {msg['content']}\n\n"
663
 
664
- # Only show button if there is history to save
665
  if chat_log:
666
  st.download_button(
667
  label="💾 Save Chat",
@@ -678,7 +327,7 @@ with tab2:
678
  with st.chat_message(message["role"]):
679
  st.markdown(message["content"])
680
 
681
- # --- CHAT INPUT HANDLING ---
682
  if prompt := st.chat_input("Ask a question..."):
683
  # 1. Display User Message and save to history
684
  st.session_state.messages.append({"role": "user", "content": prompt})
@@ -729,17 +378,13 @@ with tab2:
729
  recent_history = st.session_state.messages[-(history_depth+1):-1]
730
  messages_payload.extend(recent_history)
731
 
732
- # Add the final (potentially augmented) user message
733
  messages_payload.append({"role": "user", "content": final_user_content})
734
 
735
  # 5. Generate Response
736
  with st.chat_message("assistant"):
737
  with st.spinner(f"Thinking with {selected_model_name}..."):
738
- # Determine max_len
739
- # (Ensure max_len is defined in the script scope or use st.session_state if needed.
740
- # In your code it's defined in the sidebar, so it should be accessible here)
741
-
742
- # --- MODEL MAPPING ---
743
  model_id = ""
744
  ollama_map = {
745
  "Granite 4 (IBM)": "granite4:latest",
@@ -780,215 +425,6 @@ with tab2:
780
  st.caption(f"Rank {i+1} (Source: {src}, Rel: {score})")
781
  st.text(doc.page_content)
782
  st.divider()
783
- # --- TAB 3: PROMPT ARCHITECT ---
784
- with tab3:
785
- st.header("🛠️ Mega-Prompt Factory")
786
- st.info("Build standard templates for NIPRGPT.")
787
-
788
- c1, c2 = st.columns([1,1])
789
- with c1:
790
- st.subheader("1. Parameters")
791
- p = st.text_area("Persona", placeholder="Act as...", height=100)
792
- c = st.text_area("Context", placeholder="Background...", height=100)
793
- t = st.text_area("Task", placeholder="Action...", height=100)
794
- v = st.text_input("Placeholder Name", value="PASTE_DATA_HERE")
795
-
796
- with c2:
797
- st.subheader("2. Result")
798
- final = f"### ROLE\n{p}\n### CONTEXT\n{c}\n### TASK\n{t}\n### INPUT DATA\n\"\"\"\n[{v}]\n\"\"\""
799
- st.code(final, language="markdown")
800
- st.download_button("💾 Download .txt", final, "template.txt")
801
-
802
- # --- TAB 4: KNOWLEDGE BASE ---
803
- with tab4:
804
- st.header("🧠 Personal Knowledge Base")
805
- st.info(f"Managing knowledge for: **{st.session_state.username}**")
806
-
807
- # We no longer check 'is_admin' for the whole tab
808
- kb_tab1, kb_tab2 = st.tabs(["📤 Add Documents", "🗂️ Manage Database"])
809
-
810
- # --- SUB-TAB 1: UPLOAD ---
811
- with kb_tab1:
812
- st.subheader("Ingest New Knowledge")
813
- uploaded_file = st.file_uploader("Upload Instructions, Manuals, or Logs", type=["pdf", "docx", "txt", "md"])
814
-
815
- col1, col2 = st.columns([1, 2])
816
- with col1:
817
- chunk_strategy = st.selectbox(
818
- "Chunking Strategy",
819
- ["paragraph", "token", "page"],
820
- help="Paragraph: Manuals. Token: Dense text. Page: Forms."
821
- )
822
-
823
- if uploaded_file and st.button("Process & Add"):
824
- with st.spinner("Analyzing and Indexing..."):
825
- # 1. Save temp file
826
- temp_path = rag_engine.save_uploaded_file(uploaded_file)
827
-
828
- # 2. Process into USER'S specific DB (st.session_state.username)
829
- success, msg = rag_engine.process_and_add_document(
830
- temp_path,
831
- st.session_state.username,
832
- chunk_strategy
833
- )
834
-
835
- if success:
836
- st.success(msg)
837
- st.rerun()
838
- else:
839
- st.error(f"Failed: {msg}")
840
-
841
- st.divider()
842
- st.subheader("🔎 Quick Test")
843
- test_query = st.text_input("Ask your brain something...")
844
- if test_query:
845
- results = rag_engine.search_knowledge_base(test_query, st.session_state.username)
846
- if not results:
847
- st.warning("No matches found.")
848
- for i, doc in enumerate(results):
849
- src_name = os.path.basename(doc.metadata.get('source', '?'))
850
- score = doc.metadata.get('relevance_score', 'N/A')
851
- with st.expander(f"Match {i+1}: {src_name} (Score: {score})"):
852
- st.write(doc.page_content)
853
-
854
- # --- SUB-TAB 2: MANAGE (Unlocked for Everyone) ---
855
- with kb_tab2:
856
- st.subheader("🗄️ Database Inventory")
857
-
858
- docs = rag_engine.list_documents(st.session_state.username)
859
-
860
- if not docs:
861
- st.info("Your Knowledge Base is empty.")
862
- else:
863
- st.markdown(f"**Total Documents:** {len(docs)}")
864
-
865
- for doc in docs:
866
- c1, c2, c3, c4 = st.columns([3, 2, 1, 1])
867
- with c1:
868
- st.text(f"📄 {doc['filename']}")
869
- with c2:
870
- # FIX: Show strategy
871
- st.caption(f"⚙️ {doc.get('strategy', 'Unknown')}")
872
- with c3:
873
- st.caption(f"{doc['chunks']}")
874
- with c4:
875
- if st.button("🗑️", key=doc['source'], help="Delete Document"):
876
- with st.spinner("Deleting..."):
877
- success, msg = rag_engine.delete_document(st.session_state.username, doc['source'])
878
- if success:
879
- st.success(msg)
880
- st.rerun()
881
- else:
882
- st.error(msg)
883
-
884
- st.divider()
885
- with st.expander("🚨 Danger Zone"):
886
- # Allow ANY user to reset their OWN database
887
- if st.button("☢️ RESET MY DATABASE", type="primary"):
888
- success, msg = rag_engine.reset_knowledge_base(st.session_state.username)
889
- if success:
890
- st.success(msg)
891
- st.rerun()
892
- # --- CHAT INPUT HANDLING ---
893
- if prompt := st.chat_input("Ask a question..."):
894
- # 1. Display User Message and save to history
895
- st.session_state.messages.append({"role": "user", "content": prompt})
896
- with st.chat_message("user"):
897
- st.markdown(prompt)
898
-
899
- # 2. Initialize the Payload with System Persona
900
- system_persona = "You are a Navy Document Analyst. Your task is to answer the user's question using ONLY the Context provided below. If the answer is not present in the Context, return ONLY this exact phrase: 'I cannot find that information in the provided documents.' If no context is provided, answer generally."
901
-
902
- # Start the message payload with the system persona
903
- messages_payload = [{"role": "system", "content": system_persona}]
904
-
905
- # --- MEMORY LOGIC: SLIDING WINDOW ---
906
- # Get the last N messages (e.g., 6 total: 3 user + 3 assistant) for memory.
907
- # We start from -7 because we need to exclude the current prompt (already added)
908
- # and we want pairs of messages (user/assistant).
909
- history_depth = 8 # 4 full exchanges (8 messages) + current
910
- recent_history = st.session_state.messages[-(history_depth+1):-1]
911
-
912
- # Add history to payload
913
- messages_payload.extend(recent_history)
914
-
915
- # 3. Handle RAG & Current Prompt Augmentation
916
- final_user_content = prompt
917
- retrieved_docs = [] # Initialize for the context display later
918
-
919
- if use_rag:
920
- with st.spinner("🧠 Searching Knowledge Base..."):
921
- # Retrieve Docs
922
- retrieved_docs = rag_engine.search_knowledge_base(
923
- prompt,
924
- st.session_state.username
925
- )
926
-
927
- # Format Context
928
- context_text = ""
929
- if retrieved_docs:
930
- for doc in retrieved_docs:
931
- score = doc.metadata.get('relevance_score', 'N/A')
932
- src = os.path.basename(doc.metadata.get('source', 'Unknown'))
933
- context_text += f"---\nSOURCE: {src} (Rel: {score})\nTEXT: {doc.page_content}\n"
934
-
935
- # Augment the FINAL prompt with RAG context
936
- final_user_content = (
937
- f"User Question: {prompt}\n\n"
938
- f"Relevant Context:\n{context_text}\n\n"
939
- "Answer the question using the context provided."
940
- )
941
-
942
- # 4. Add the final (potentially augmented) user message to payload
943
- messages_payload.append({"role": "user", "content": final_user_content})
944
-
945
- # 5. Generate Response and Display
946
- with st.chat_message("assistant"):
947
- with st.spinner(f"Thinking with {selected_model_name}..."):
948
- # Determine model ID and max_len (assuming these are defined globally)
949
- max_len = 2000 # Example max length
950
- model_id = "" # To be mapped
951
-
952
- # --- MODEL MAPPING LOGIC (Use your existing global logic) ---
953
- ollama_map = {
954
- "Granite 4 (IBM)": "granite4:latest",
955
- "Llama 3.2 (Meta)": "llama3.2:latest",
956
- "Gemma 3 (Google)": "gemma3:latest"
957
- }
958
- for key, val in ollama_map.items():
959
- if key in selected_model_name:
960
- model_id = val
961
- break
962
-
963
- if not model_id and "gpt" in selected_model_name.lower():
964
- # If it's the GPT model choice
965
- response, usage = query_openai_model(messages_payload, max_len)
966
- elif model_id:
967
- # If it's the local Ollama model
968
- response, usage = query_local_model(messages_payload, max_len, model_id)
969
- else:
970
- response, usage = "Error: Could not determine model to use.", None
971
-
972
- st.markdown(response)
973
-
974
- # 6. Final Steps: Save Assistant Response and Update Metrics
975
- st.session_state.messages.append({"role": "assistant", "content": response})
976
-
977
- if usage:
978
- m_name = "Granite" if "Granite" in selected_model_name else "GPT-4o"
979
- tracker.log_usage(m_name, usage["input"], usage["output"])
980
- # Assuming update_sidebar_metrics() is defined globally
981
- update_sidebar_metrics()
982
-
983
- # 7. Display Context Used (if RAG was enabled)
984
- if use_rag and retrieved_docs:
985
- with st.expander("📚 View Context Used"):
986
- for i, doc in enumerate(retrieved_docs):
987
- score = doc.metadata.get('relevance_score', 'N/A')
988
- src = os.path.basename(doc.metadata.get('source', 'Unknown'))
989
- st.caption(f"Rank {i+1} (Source: {src}, Rel: {score})")
990
- st.text(doc.page_content)
991
- st.divider()
992
 
993
  # --- TAB 3: PROMPT ARCHITECT ---
994
  with tab3:
@@ -1077,7 +513,6 @@ with tab4:
1077
  with c1:
1078
  st.text(f"📄 {doc['filename']}")
1079
  with c2:
1080
- # FIX: Show strategy
1081
  st.caption(f"⚙️ {doc.get('strategy', 'Unknown')}")
1082
  with c3:
1083
  st.caption(f"{doc['chunks']}")
 
108
  model_choice = st.radio(
109
  "Choose your Intelligence:",
110
  model_options,
111
+ captions=model_captions,
112
+ key="model_selector_radio" # Unique Key Added for Safety
113
  )
114
  st.info(f"Connected to: **{model_choice}**")
115
 
 
141
  url = API_URL_ROOT + "/generate"
142
 
143
  # --- FLATTEN MESSAGE HISTORY ---
 
 
 
144
  formatted_history = ""
145
  system_persona = "You are a helpful assistant." # Default
146
 
 
156
  formatted_history += "Assistant: "
157
 
158
  payload = {
159
+ "text": formatted_history,
160
  "persona": system_persona,
161
  "max_tokens": max_tokens,
162
  "model": model_name
 
206
 
207
  def ask_ai(user_prompt, system_persona, max_tokens):
208
  # 1. Standardize Input: Convert the strings into the Message List format
 
209
  messages_payload = [
210
  {"role": "system", "content": system_persona},
211
  {"role": "user", "content": user_prompt}
 
213
 
214
  # 2. Routing Logic
215
  if "GPT-4o" in model_choice:
 
216
  return query_openai_model(messages_payload, max_tokens)
217
  else:
 
218
  technical_name = model_map[model_choice]
 
219
  return query_local_model(messages_payload, max_tokens, technical_name)
220
 
221
  # --- MAIN UI ---
 
277
  st.session_state.email_draft = reply
278
 
279
  if usage:
 
280
  if "GPT-4o" in model_choice:
281
  m_name = "GPT-4o"
282
  else:
 
283
  m_name = model_choice.split(" ")[0]
 
284
  tracker.log_usage(m_name, usage["input"], usage["output"])
285
+ update_sidebar_metrics()
286
 
287
  if st.session_state.email_draft:
288
  st.subheader("Draft Result")
 
297
  st.session_state.messages = []
298
 
299
  # --- CONTROLS AND METRICS ---
 
300
  c1, c2, c3 = st.columns([2, 1, 1])
301
  with c1:
302
+ # Use the global model_choice
303
  selected_model_name = st.session_state.get('model_choice', 'Granite 4 (IBM)')
304
  with c2:
305
  use_rag = st.toggle("🔌 Enable Knowledge Base", value=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307
  with c3:
308
  # --- NEW FEATURE: DOWNLOAD CHAT ---
 
309
  chat_log = ""
310
  for msg in st.session_state.messages:
311
  role = "USER" if msg['role'] == 'user' else "ASSISTANT"
312
  chat_log += f"[{role}]: {msg['content']}\n\n"
313
 
 
314
  if chat_log:
315
  st.download_button(
316
  label="💾 Save Chat",
 
327
  with st.chat_message(message["role"]):
328
  st.markdown(message["content"])
329
 
330
+ # --- CHAT INPUT HANDLING ---
331
  if prompt := st.chat_input("Ask a question..."):
332
  # 1. Display User Message and save to history
333
  st.session_state.messages.append({"role": "user", "content": prompt})
 
378
  recent_history = st.session_state.messages[-(history_depth+1):-1]
379
  messages_payload.extend(recent_history)
380
 
381
+ # Add the final (potentially augmented) user message to payload
382
  messages_payload.append({"role": "user", "content": final_user_content})
383
 
384
  # 5. Generate Response
385
  with st.chat_message("assistant"):
386
  with st.spinner(f"Thinking with {selected_model_name}..."):
387
+ # Determine model ID
 
 
 
 
388
  model_id = ""
389
  ollama_map = {
390
  "Granite 4 (IBM)": "granite4:latest",
 
425
  st.caption(f"Rank {i+1} (Source: {src}, Rel: {score})")
426
  st.text(doc.page_content)
427
  st.divider()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
428
 
429
  # --- TAB 3: PROMPT ARCHITECT ---
430
  with tab3:
 
513
  with c1:
514
  st.text(f"📄 {doc['filename']}")
515
  with c2:
 
516
  st.caption(f"⚙️ {doc.get('strategy', 'Unknown')}")
517
  with c3:
518
  st.caption(f"{doc['chunks']}")