import streamlit as st import requests import os import unicodedata import resources # Assuming this file exists in your repo import tracker import rag_engine # Now safe to import at top level (lazy loading enabled) from openai import OpenAI from datetime import datetime # --- CONFIGURATION --- st.set_page_config(page_title="Navy AI Toolkit", page_icon="⚓", layout="wide") # 1. SETUP CREDENTIALS API_URL_ROOT = os.getenv("API_URL") # For Ollama models OPENAI_KEY = os.getenv("OPENAI_API_KEY") # For GPT-4o # --- INITIALIZATION --- if "roles" not in st.session_state: st.session_state.roles = [] # --- LOGIN / REGISTER LOGIC --- if "authentication_status" not in st.session_state or st.session_state["authentication_status"] is None: # If not logged in, show tabs login_tab, register_tab = st.tabs(["🔑 Login", "📝 Register"]) with login_tab: is_logged_in = tracker.check_login() # FIX: Trigger User DB Download ONLY on fresh login if is_logged_in: tracker.download_user_db(st.session_state.username) st.rerun() # Refresh to show the app with register_tab: st.header("Create Account") with st.form("reg_form"): new_user = st.text_input("Username") new_name = st.text_input("Display Name") new_email = st.text_input("Email") new_pwd = st.text_input("Password", type="password") invite = st.text_input("Invitation Passcode") if st.form_submit_button("Register"): success, msg = tracker.register_user(new_email, new_user, new_name, new_pwd, invite) if success: st.success(msg) else: st.error(msg) # Stop execution if not logged in if not st.session_state.get("authentication_status"): st.stop() # --- GLOBAL PLACEHOLDERS --- metric_placeholder = None admin_metric_placeholder = None # --- SIDEBAR (CONSOLIDATED) --- with st.sidebar: st.header("👤 User Profile") st.write(f"Welcome, **{st.session_state.name}**") st.header("📊 Usage Tracker") metric_placeholder = st.empty() # Admin Tools if "admin" in st.session_state.roles: st.divider() st.header("🛡️ Admin Tools") admin_metric_placeholder = st.empty() # FIX: Point to the correct persistence path log_path = tracker.get_log_path() if log_path.exists(): with open(log_path, "r") as f: log_data = f.read() st.download_button( label="📥 Download Usage Logs", data=log_data, file_name=f"usage_log_{datetime.now().strftime('%Y-%m-%d')}.json", mime="application/json" ) else: st.warning("No logs found yet.") # Logout if "authenticator" in st.session_state: st.session_state.authenticator.logout(location='sidebar') st.divider() # --- MODEL SELECTOR --- st.header("🧠 Model Selector") model_map = { "Granite 4 (IBM)": "granite4:latest", "Llama 3.2 (Meta)": "llama3.2:latest", "Gemma 3 (Google)": "gemma3:latest" } model_options = list(model_map.keys()) model_captions = ["Slower for now, but free and private" for _ in model_options] if "admin" in st.session_state.roles: model_options.append("GPT-4o (Omni)") model_captions.append("Fast, smart, sends data to OpenAI") model_choice = st.radio( "Choose your Intelligence:", model_options, captions=model_captions ) st.info(f"Connected to: **{model_choice}**") st.divider() st.header("⚙️ Controls") max_len = st.slider("Max Response Length (Tokens)", 100, 2000, 500) # --- HELPER FUNCTIONS --- def update_sidebar_metrics(): """Refreshes the global placeholders defined in the sidebar.""" if metric_placeholder is None: return stats = tracker.get_daily_stats() user_stats = stats["users"].get(st.session_state.username, {"input":0, "output":0}) metric_placeholder.metric("My Tokens Today", user_stats["input"] + user_stats["output"]) if "admin" in st.session_state.roles and admin_metric_placeholder is not None: admin_metric_placeholder.metric("Team Total Today", stats["total_tokens"]) # Call metrics once on load update_sidebar_metrics() def query_local_model(messages, max_tokens, model_name): if not API_URL_ROOT: return "Error: API_URL not set.", None url = API_URL_ROOT + "/generate" # --- FLATTEN MESSAGE HISTORY --- # Since the backend expects a single string ("text"), we format the history here. # We extract the system persona separately to pass to the 'persona' field. formatted_history = "" system_persona = "You are a helpful assistant." # Default for msg in messages: if msg['role'] == 'system': system_persona = msg['content'] elif msg['role'] == 'user': formatted_history += f"User: {msg['content']}\n" elif msg['role'] == 'assistant': formatted_history += f"Assistant: {msg['content']}\n" # Append the "Assistant:" prompt at the end to cue the model formatted_history += "Assistant: " payload = { "text": formatted_history, # <--- History goes here "persona": system_persona, "max_tokens": max_tokens, "model": model_name } try: response = requests.post(url, json=payload, timeout=300) if response.status_code == 200: response_data = response.json() ans = response_data.get("response", "") usage = response_data.get("usage", {"input":0, "output":0}) return ans, usage return f"Error {response.status_code}: {response.text}", None except Exception as e: return f"Connection Error: {e}", None def query_openai_model(messages, max_tokens): if not OPENAI_KEY: return "Error: OPENAI_API_KEY not set.", None client = OpenAI(api_key=OPENAI_KEY) try: response = client.chat.completions.create( model="gpt-4o", max_tokens=max_tokens, messages=messages, temperature=0.3 ) usage_obj = response.usage usage_dict = {"input": usage_obj.prompt_tokens, "output": usage_obj.completion_tokens} return response.choices[0].message.content, usage_dict except Exception as e: return f"OpenAI Error: {e}", None def clean_text(text): if not text: return "" text = unicodedata.normalize('NFKC', text) replacements = {'“': '"', '”': '"', '‘': "'", '’': "'", '–': '-', '—': '-', '…': '...', '\u00a0': ' '} for old, new in replacements.items(): text = text.replace(old, new) return text.strip() def ask_ai(user_prompt, system_persona, max_tokens): if "GPT-4o" in model_choice: return query_local_model(user_prompt, system_persona, max_tokens) else: technical_name = model_map[model_choice] return query_local_model(user_prompt, system_persona, max_tokens, technical_name) # --- MAIN UI --- st.title("AI Toolkit") tab1, tab2, tab3, tab4 = st.tabs(["📧 Email Builder", "💬 Chat Playground", "🛠️ Prompt Architect", "📚 Knowledge Base"]) # --- TAB 1: EMAIL BUILDER --- with tab1: st.header("Structured Email Generator") if "email_draft" not in st.session_state: st.session_state.email_draft = "" st.subheader("1. Define the Voice") style_mode = st.radio("How should the AI write?", ["Use a Preset Persona", "Mimic My Style"], horizontal=True) selected_persona_instruction = "" if style_mode == "Use a Preset Persona": persona_name = st.selectbox("Select a Persona", list(resources.TONE_LIBRARY.keys())) selected_persona_instruction = resources.TONE_LIBRARY[persona_name] st.info(f"**System Instruction:** {selected_persona_instruction}") else: st.info("Upload 1-3 text files of your previous emails.") uploaded_style_files = st.file_uploader("Upload Samples (.txt)", type=["txt"], accept_multiple_files=True) if uploaded_style_files: style_context = "" for uploaded_file in uploaded_style_files: string_data = uploaded_file.read().decode("utf-8") style_context += f"---\n{string_data}\n---\n" selected_persona_instruction = f"Analyze these examples and mimic the style:\n{style_context}" st.divider() st.subheader("2. Details") c1, c2 = st.columns(2) with c1: recipient = st.text_input("Recipient") with c2: topic = st.text_input("Topic") st.caption("Content Source") input_method = st.toggle("Upload notes file?") raw_notes = "" if input_method: notes_file = st.file_uploader("Upload Notes (.txt)", type=["txt"]) if notes_file: raw_notes = notes_file.read().decode("utf-8") else: raw_notes = st.text_area("Paste notes:", height=150) # Context Bar est_tokens = len(raw_notes) / 4 st.progress(min(est_tokens / 128000, 1.0), text=f"Context: {int(est_tokens)} tokens") if st.button("Draft Email", type="primary"): if not raw_notes: st.warning("Please provide notes.") else: clean_notes = clean_text(raw_notes) with st.spinner(f"Drafting with {model_choice}..."): prompt = f"TASK: Write email.\nTO: {recipient}\nTOPIC: {topic}\nSTYLE: {selected_persona_instruction}\nDATA: {clean_notes}" reply, usage = ask_ai(prompt, "You are an expert ghostwriter.", max_len) st.session_state.email_draft = reply if usage: m_name = "Granite" if "Granite" in model_choice else "GPT-4o" tracker.log_usage(m_name, usage["input"], usage["output"]) update_sidebar_metrics() # Force update if st.session_state.email_draft: st.subheader("Draft Result") st.text_area("Copy your email:", value=st.session_state.email_draft, height=300) # --- TAB 2: CHAT PLAYGROUND --- with tab2: st.header("Choose Your Model and Start a Discussion") # --- INITIALIZE CHAT MEMORY (MUST BE DONE FIRST) --- if "messages" not in st.session_state: st.session_state.messages = [] # --- CONTROLS AND METRICS --- # The controls are kept outside the chat loop. c1, c2, c3 = st.columns([2, 1, 1]) with c1: # Use the global model_choice from the sidebar/tab1 initialization selected_model_name = st.session_state.get('model_choice', 'Granite 4 (IBM)') with c2: use_rag = st.toggle("🔌 Enable Knowledge Base", value=False) # The token progress bar will be handled inside the prompt logic based on input length with c3: # --- NEW FEATURE: DOWNLOAD CHAT --- # Convert history to a readable string chat_log = "" for msg in st.session_state.messages: role = "USER" if msg['role'] == 'user' else "ASSISTANT" chat_log += f"[{role}]: {msg['content']}\n\n" # Only show button if there is history to save if chat_log: st.download_button( label="💾 Save Chat", data=chat_log, file_name="mission_log.txt", mime="text/plain", help="Download the current conversation history." ) st.divider() # --- DISPLAY CONVERSATION HISTORY --- for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # --- CHAT INPUT HANDLING (Replaces st.text_input and st.button) --- if prompt := st.chat_input("Ask a question..."): # 1. Display User Message and save to history st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) # 2. Initialize the Payload with System Persona system_persona = "You are a Navy Document Analyst. Your task is to answer the user's question using ONLY the Context provided below. If the answer is not present in the Context, return ONLY this exact phrase: 'I cannot find that information in the provided documents.' If no context is provided, answer generally." # Start the message payload with the system persona messages_payload = [{"role": "system", "content": system_persona}] # --- MEMORY LOGIC: SLIDING WINDOW --- # Get the last N messages (e.g., 6 total: 3 user + 3 assistant) for memory. # We start from -7 because we need to exclude the current prompt (already added) # and we want pairs of messages (user/assistant). history_depth = 8 # 4 full exchanges (8 messages) + current recent_history = st.session_state.messages[-(history_depth+1):-1] # Add history to payload messages_payload.extend(recent_history) # 3. Handle RAG & Current Prompt Augmentation final_user_content = prompt retrieved_docs = [] # Initialize for the context display later if use_rag: with st.spinner("🧠 Searching Knowledge Base..."): # Retrieve Docs retrieved_docs = rag_engine.search_knowledge_base( prompt, st.session_state.username ) # Format Context context_text = "" if retrieved_docs: for doc in retrieved_docs: score = doc.metadata.get('relevance_score', 'N/A') src = os.path.basename(doc.metadata.get('source', 'Unknown')) context_text += f"---\nSOURCE: {src} (Rel: {score})\nTEXT: {doc.page_content}\n" # Augment the FINAL prompt with RAG context final_user_content = ( f"User Question: {prompt}\n\n" f"Relevant Context:\n{context_text}\n\n" "Answer the question using the context provided." ) # 4. Add the final (potentially augmented) user message to payload messages_payload.append({"role": "user", "content": final_user_content}) # 5. Generate Response and Display with st.chat_message("assistant"): with st.spinner(f"Thinking with {selected_model_name}..."): # Determine model ID and max_len (assuming these are defined globally) max_len = 2000 # Example max length model_id = "" # To be mapped # --- MODEL MAPPING LOGIC (Use your existing global logic) --- ollama_map = { "Granite 4 (IBM)": "granite4:latest", "Llama 3.2 (Meta)": "llama3.2:latest", "Gemma 3 (Google)": "gemma3:latest" } for key, val in ollama_map.items(): if key in selected_model_name: model_id = val break if not model_id and "gpt" in selected_model_name.lower(): # If it's the GPT model choice response, usage = query_openai_model(messages_payload, max_len) elif model_id: # If it's the local Ollama model response, usage = query_local_model(messages_payload, max_len, model_id) else: response, usage = "Error: Could not determine model to use.", None st.markdown(response) # 6. Final Steps: Save Assistant Response and Update Metrics st.session_state.messages.append({"role": "assistant", "content": response}) if usage: m_name = "Granite" if "Granite" in selected_model_name else "GPT-4o" tracker.log_usage(m_name, usage["input"], usage["output"]) # Assuming update_sidebar_metrics() is defined globally update_sidebar_metrics() # 7. Display Context Used (if RAG was enabled) if use_rag and retrieved_docs: with st.expander("📚 View Context Used"): for i, doc in enumerate(retrieved_docs): score = doc.metadata.get('relevance_score', 'N/A') src = os.path.basename(doc.metadata.get('source', 'Unknown')) st.caption(f"Rank {i+1} (Source: {src}, Rel: {score})") st.text(doc.page_content) st.divider() # --- TAB 3: PROMPT ARCHITECT --- with tab3: st.header("🛠️ Mega-Prompt Factory") st.info("Build standard templates for NIPRGPT.") c1, c2 = st.columns([1,1]) with c1: st.subheader("1. Parameters") p = st.text_area("Persona", placeholder="Act as...", height=100) c = st.text_area("Context", placeholder="Background...", height=100) t = st.text_area("Task", placeholder="Action...", height=100) v = st.text_input("Placeholder Name", value="PASTE_DATA_HERE") with c2: st.subheader("2. Result") final = f"### ROLE\n{p}\n### CONTEXT\n{c}\n### TASK\n{t}\n### INPUT DATA\n\"\"\"\n[{v}]\n\"\"\"" st.code(final, language="markdown") st.download_button("💾 Download .txt", final, "template.txt") # --- TAB 4: KNOWLEDGE BASE --- with tab4: st.header("🧠 Personal Knowledge Base") st.info(f"Managing knowledge for: **{st.session_state.username}**") # We no longer check 'is_admin' for the whole tab kb_tab1, kb_tab2 = st.tabs(["📤 Add Documents", "🗂️ Manage Database"]) # --- SUB-TAB 1: UPLOAD (Unlocked for Everyone) --- with kb_tab1: st.subheader("Ingest New Knowledge") uploaded_file = st.file_uploader("Upload Instructions, Manuals, or Logs", type=["pdf", "docx", "txt", "md"]) col1, col2 = st.columns([1, 2]) with col1: chunk_strategy = st.selectbox( "Chunking Strategy", ["paragraph", "token", "page"], help="Paragraph: Manuals. Token: Dense text. Page: Forms." ) if uploaded_file and st.button("Process & Add"): with st.spinner("Analyzing and Indexing..."): # 1. Save temp file temp_path = rag_engine.save_uploaded_file(uploaded_file) # 2. Process into USER'S specific DB (st.session_state.username) success, msg = rag_engine.process_and_add_document( temp_path, st.session_state.username, chunk_strategy ) if success: st.success(msg) st.rerun() else: st.error(f"Failed: {msg}") st.divider() st.subheader("🔎 Quick Test") test_query = st.text_input("Ask your brain something...") if test_query: results = rag_engine.search_knowledge_base(test_query, st.session_state.username) if not results: st.warning("No matches found.") for i, doc in enumerate(results): src_name = os.path.basename(doc.metadata.get('source', '?')) score = doc.metadata.get('relevance_score', 'N/A') with st.expander(f"Match {i+1}: {src_name} (Score: {score})"): st.write(doc.page_content) # --- SUB-TAB 2: MANAGE (Unlocked for Everyone) --- with kb_tab2: st.subheader("🗄️ Database Inventory") docs = rag_engine.list_documents(st.session_state.username) if not docs: st.info("Your Knowledge Base is empty.") else: st.markdown(f"**Total Documents:** {len(docs)}") for doc in docs: c1, c2, c3, c4 = st.columns([3, 2, 1, 1]) with c1: st.text(f"📄 {doc['filename']}") with c2: # FIX: Show strategy st.caption(f"⚙️ {doc.get('strategy', 'Unknown')}") with c3: st.caption(f"{doc['chunks']}") with c4: if st.button("🗑️", key=doc['source'], help="Delete Document"): with st.spinner("Deleting..."): success, msg = rag_engine.delete_document(st.session_state.username, doc['source']) if success: st.success(msg) st.rerun() else: st.error(msg) st.divider() with st.expander("🚨 Danger Zone"): # Allow ANY user to reset their OWN database if st.button("☢️ RESET MY DATABASE", type="primary"): success, msg = rag_engine.reset_knowledge_base(st.session_state.username) if success: st.success(msg) st.rerun()