Spaces:
Sleeping
Sleeping
Update src/app.py
Browse filesfixed issue forcing RAG mode unintentionally all the time
- src/app.py +553 -1
src/app.py
CHANGED
|
@@ -340,7 +340,559 @@ with tab2:
|
|
| 340 |
with st.chat_message(message["role"]):
|
| 341 |
st.markdown(message["content"])
|
| 342 |
|
| 343 |
-
# --- CHAT INPUT HANDLING
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 344 |
if prompt := st.chat_input("Ask a question..."):
|
| 345 |
# 1. Display User Message and save to history
|
| 346 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
|
|
|
| 340 |
with st.chat_message(message["role"]):
|
| 341 |
st.markdown(message["content"])
|
| 342 |
|
| 343 |
+
# --- CHAT INPUT HANDLING import streamlit as st
|
| 344 |
+
import requests
|
| 345 |
+
import os
|
| 346 |
+
import unicodedata
|
| 347 |
+
import resources # Assuming this file exists in your repo
|
| 348 |
+
import tracker
|
| 349 |
+
import rag_engine # Now safe to import at top level (lazy loading enabled)
|
| 350 |
+
from openai import OpenAI
|
| 351 |
+
from datetime import datetime
|
| 352 |
+
|
| 353 |
+
# --- CONFIGURATION ---
|
| 354 |
+
st.set_page_config(page_title="Navy AI Toolkit", page_icon="β", layout="wide")
|
| 355 |
+
|
| 356 |
+
# 1. SETUP CREDENTIALS
|
| 357 |
+
API_URL_ROOT = os.getenv("API_URL") # For Ollama models
|
| 358 |
+
OPENAI_KEY = os.getenv("OPENAI_API_KEY") # For GPT-4o
|
| 359 |
+
|
| 360 |
+
# --- INITIALIZATION ---
|
| 361 |
+
if "roles" not in st.session_state:
|
| 362 |
+
st.session_state.roles = []
|
| 363 |
+
|
| 364 |
+
# --- LOGIN / REGISTER LOGIC ---
|
| 365 |
+
if "authentication_status" not in st.session_state or st.session_state["authentication_status"] is None:
|
| 366 |
+
# If not logged in, show tabs
|
| 367 |
+
login_tab, register_tab = st.tabs(["π Login", "π Register"])
|
| 368 |
+
|
| 369 |
+
with login_tab:
|
| 370 |
+
is_logged_in = tracker.check_login()
|
| 371 |
+
# FIX: Trigger User DB Download ONLY on fresh login
|
| 372 |
+
if is_logged_in:
|
| 373 |
+
tracker.download_user_db(st.session_state.username)
|
| 374 |
+
st.rerun() # Refresh to show the app
|
| 375 |
+
|
| 376 |
+
with register_tab:
|
| 377 |
+
st.header("Create Account")
|
| 378 |
+
with st.form("reg_form"):
|
| 379 |
+
new_user = st.text_input("Username")
|
| 380 |
+
new_name = st.text_input("Display Name")
|
| 381 |
+
new_email = st.text_input("Email")
|
| 382 |
+
new_pwd = st.text_input("Password", type="password")
|
| 383 |
+
invite = st.text_input("Invitation Passcode")
|
| 384 |
+
|
| 385 |
+
if st.form_submit_button("Register"):
|
| 386 |
+
success, msg = tracker.register_user(new_email, new_user, new_name, new_pwd, invite)
|
| 387 |
+
if success:
|
| 388 |
+
st.success(msg)
|
| 389 |
+
else:
|
| 390 |
+
st.error(msg)
|
| 391 |
+
|
| 392 |
+
# Stop execution if not logged in
|
| 393 |
+
if not st.session_state.get("authentication_status"):
|
| 394 |
+
st.stop()
|
| 395 |
+
|
| 396 |
+
# --- GLOBAL PLACEHOLDERS ---
|
| 397 |
+
metric_placeholder = None
|
| 398 |
+
admin_metric_placeholder = None
|
| 399 |
+
|
| 400 |
+
# --- SIDEBAR (CONSOLIDATED) ---
|
| 401 |
+
with st.sidebar:
|
| 402 |
+
st.header("π€ User Profile")
|
| 403 |
+
st.write(f"Welcome, **{st.session_state.name}**")
|
| 404 |
+
|
| 405 |
+
st.header("π Usage Tracker")
|
| 406 |
+
metric_placeholder = st.empty()
|
| 407 |
+
|
| 408 |
+
# Admin Tools
|
| 409 |
+
if "admin" in st.session_state.roles:
|
| 410 |
+
st.divider()
|
| 411 |
+
st.header("π‘οΈ Admin Tools")
|
| 412 |
+
admin_metric_placeholder = st.empty()
|
| 413 |
+
|
| 414 |
+
# FIX: Point to the correct persistence path
|
| 415 |
+
log_path = tracker.get_log_path()
|
| 416 |
+
if log_path.exists():
|
| 417 |
+
with open(log_path, "r") as f:
|
| 418 |
+
log_data = f.read()
|
| 419 |
+
st.download_button(
|
| 420 |
+
label="π₯ Download Usage Logs",
|
| 421 |
+
data=log_data,
|
| 422 |
+
file_name=f"usage_log_{datetime.now().strftime('%Y-%m-%d')}.json",
|
| 423 |
+
mime="application/json"
|
| 424 |
+
)
|
| 425 |
+
else:
|
| 426 |
+
st.warning("No logs found yet.")
|
| 427 |
+
|
| 428 |
+
# Logout
|
| 429 |
+
if "authenticator" in st.session_state:
|
| 430 |
+
st.session_state.authenticator.logout(location='sidebar')
|
| 431 |
+
|
| 432 |
+
st.divider()
|
| 433 |
+
|
| 434 |
+
# --- MODEL SELECTOR ---
|
| 435 |
+
st.header("π§ Model Selector")
|
| 436 |
+
|
| 437 |
+
model_map = {
|
| 438 |
+
"Granite 4 (IBM)": "granite4:latest",
|
| 439 |
+
"Llama 3.2 (Meta)": "llama3.2:latest",
|
| 440 |
+
"Gemma 3 (Google)": "gemma3:latest"
|
| 441 |
+
}
|
| 442 |
+
|
| 443 |
+
model_options = list(model_map.keys())
|
| 444 |
+
model_captions = ["Slower for now, but free and private" for _ in model_options]
|
| 445 |
+
|
| 446 |
+
if "admin" in st.session_state.roles:
|
| 447 |
+
model_options.append("GPT-4o (Omni)")
|
| 448 |
+
model_captions.append("Fast, smart, sends data to OpenAI")
|
| 449 |
+
|
| 450 |
+
model_choice = st.radio(
|
| 451 |
+
"Choose your Intelligence:",
|
| 452 |
+
model_options,
|
| 453 |
+
captions=model_captions
|
| 454 |
+
)
|
| 455 |
+
st.info(f"Connected to: **{model_choice}**")
|
| 456 |
+
|
| 457 |
+
st.divider()
|
| 458 |
+
st.header("βοΈ Controls")
|
| 459 |
+
max_len = st.slider("Max Response Length (Tokens)", 100, 2000, 500)
|
| 460 |
+
|
| 461 |
+
# --- HELPER FUNCTIONS ---
|
| 462 |
+
def update_sidebar_metrics():
|
| 463 |
+
"""Refreshes the global placeholders defined in the sidebar."""
|
| 464 |
+
if metric_placeholder is None:
|
| 465 |
+
return
|
| 466 |
+
|
| 467 |
+
stats = tracker.get_daily_stats()
|
| 468 |
+
user_stats = stats["users"].get(st.session_state.username, {"input":0, "output":0})
|
| 469 |
+
|
| 470 |
+
metric_placeholder.metric("My Tokens Today", user_stats["input"] + user_stats["output"])
|
| 471 |
+
|
| 472 |
+
if "admin" in st.session_state.roles and admin_metric_placeholder is not None:
|
| 473 |
+
admin_metric_placeholder.metric("Team Total Today", stats["total_tokens"])
|
| 474 |
+
|
| 475 |
+
# Call metrics once on load
|
| 476 |
+
update_sidebar_metrics()
|
| 477 |
+
|
| 478 |
+
def query_local_model(messages, max_tokens, model_name):
|
| 479 |
+
if not API_URL_ROOT:
|
| 480 |
+
return "Error: API_URL not set.", None
|
| 481 |
+
|
| 482 |
+
url = API_URL_ROOT + "/generate"
|
| 483 |
+
|
| 484 |
+
# --- FLATTEN MESSAGE HISTORY ---
|
| 485 |
+
# Since the backend expects a single string ("text"), we format the history here.
|
| 486 |
+
# We extract the system persona separately to pass to the 'persona' field.
|
| 487 |
+
|
| 488 |
+
formatted_history = ""
|
| 489 |
+
system_persona = "You are a helpful assistant." # Default
|
| 490 |
+
|
| 491 |
+
for msg in messages:
|
| 492 |
+
if msg['role'] == 'system':
|
| 493 |
+
system_persona = msg['content']
|
| 494 |
+
elif msg['role'] == 'user':
|
| 495 |
+
formatted_history += f"User: {msg['content']}\n"
|
| 496 |
+
elif msg['role'] == 'assistant':
|
| 497 |
+
formatted_history += f"Assistant: {msg['content']}\n"
|
| 498 |
+
|
| 499 |
+
# Append the "Assistant:" prompt at the end to cue the model
|
| 500 |
+
formatted_history += "Assistant: "
|
| 501 |
+
|
| 502 |
+
payload = {
|
| 503 |
+
"text": formatted_history, # <--- History goes here
|
| 504 |
+
"persona": system_persona,
|
| 505 |
+
"max_tokens": max_tokens,
|
| 506 |
+
"model": model_name
|
| 507 |
+
}
|
| 508 |
+
|
| 509 |
+
try:
|
| 510 |
+
response = requests.post(url, json=payload, timeout=300)
|
| 511 |
+
|
| 512 |
+
if response.status_code == 200:
|
| 513 |
+
response_data = response.json()
|
| 514 |
+
ans = response_data.get("response", "")
|
| 515 |
+
usage = response_data.get("usage", {"input":0, "output":0})
|
| 516 |
+
return ans, usage
|
| 517 |
+
|
| 518 |
+
return f"Error {response.status_code}: {response.text}", None
|
| 519 |
+
|
| 520 |
+
except Exception as e:
|
| 521 |
+
return f"Connection Error: {e}", None
|
| 522 |
+
|
| 523 |
+
def query_openai_model(messages, max_tokens):
|
| 524 |
+
if not OPENAI_KEY:
|
| 525 |
+
return "Error: OPENAI_API_KEY not set.", None
|
| 526 |
+
|
| 527 |
+
client = OpenAI(api_key=OPENAI_KEY)
|
| 528 |
+
|
| 529 |
+
try:
|
| 530 |
+
response = client.chat.completions.create(
|
| 531 |
+
model="gpt-4o",
|
| 532 |
+
max_tokens=max_tokens,
|
| 533 |
+
messages=messages,
|
| 534 |
+
temperature=0.3
|
| 535 |
+
)
|
| 536 |
+
usage_obj = response.usage
|
| 537 |
+
usage_dict = {"input": usage_obj.prompt_tokens, "output": usage_obj.completion_tokens}
|
| 538 |
+
return response.choices[0].message.content, usage_dict
|
| 539 |
+
|
| 540 |
+
except Exception as e:
|
| 541 |
+
return f"OpenAI Error: {e}", None
|
| 542 |
+
|
| 543 |
+
def clean_text(text):
|
| 544 |
+
if not text: return ""
|
| 545 |
+
text = unicodedata.normalize('NFKC', text)
|
| 546 |
+
replacements = {'β': '"', 'β': '"', 'β': "'", 'β': "'", 'β': '-', 'β': '-', 'β¦': '...', '\u00a0': ' '}
|
| 547 |
+
for old, new in replacements.items():
|
| 548 |
+
text = text.replace(old, new)
|
| 549 |
+
return text.strip()
|
| 550 |
+
|
| 551 |
+
def ask_ai(user_prompt, system_persona, max_tokens):
|
| 552 |
+
# 1. Standardize Input: Convert the strings into the Message List format
|
| 553 |
+
# This ensures compatibility with our new memory-aware backend functions
|
| 554 |
+
messages_payload = [
|
| 555 |
+
{"role": "system", "content": system_persona},
|
| 556 |
+
{"role": "user", "content": user_prompt}
|
| 557 |
+
]
|
| 558 |
+
|
| 559 |
+
# 2. Routing Logic
|
| 560 |
+
if "GPT-4o" in model_choice:
|
| 561 |
+
# CORRECTED: Now calls the OpenAI function
|
| 562 |
+
return query_openai_model(messages_payload, max_tokens)
|
| 563 |
+
else:
|
| 564 |
+
# Lookup the technical name for Ollama
|
| 565 |
+
technical_name = model_map[model_choice]
|
| 566 |
+
# Calls the Local function
|
| 567 |
+
return query_local_model(messages_payload, max_tokens, technical_name)
|
| 568 |
+
|
| 569 |
+
# --- MAIN UI ---
|
| 570 |
+
st.title("AI Toolkit")
|
| 571 |
+
tab1, tab2, tab3, tab4 = st.tabs(["π§ Email Builder", "π¬ Chat Playground", "π οΈ Prompt Architect", "π Knowledge Base"])
|
| 572 |
+
|
| 573 |
+
# --- TAB 1: EMAIL BUILDER ---
|
| 574 |
+
with tab1:
|
| 575 |
+
st.header("Structured Email Generator")
|
| 576 |
+
if "email_draft" not in st.session_state:
|
| 577 |
+
st.session_state.email_draft = ""
|
| 578 |
+
|
| 579 |
+
st.subheader("1. Define the Voice")
|
| 580 |
+
style_mode = st.radio("How should the AI write?", ["Use a Preset Persona", "Mimic My Style"], horizontal=True)
|
| 581 |
+
|
| 582 |
+
selected_persona_instruction = ""
|
| 583 |
+
if style_mode == "Use a Preset Persona":
|
| 584 |
+
persona_name = st.selectbox("Select a Persona", list(resources.TONE_LIBRARY.keys()))
|
| 585 |
+
selected_persona_instruction = resources.TONE_LIBRARY[persona_name]
|
| 586 |
+
st.info(f"**System Instruction:** {selected_persona_instruction}")
|
| 587 |
+
else:
|
| 588 |
+
st.info("Upload 1-3 text files of your previous emails.")
|
| 589 |
+
uploaded_style_files = st.file_uploader("Upload Samples (.txt)", type=["txt"], accept_multiple_files=True)
|
| 590 |
+
if uploaded_style_files:
|
| 591 |
+
style_context = ""
|
| 592 |
+
for uploaded_file in uploaded_style_files:
|
| 593 |
+
string_data = uploaded_file.read().decode("utf-8")
|
| 594 |
+
style_context += f"---\n{string_data}\n---\n"
|
| 595 |
+
selected_persona_instruction = f"Analyze these examples and mimic the style:\n{style_context}"
|
| 596 |
+
|
| 597 |
+
st.divider()
|
| 598 |
+
st.subheader("2. Details")
|
| 599 |
+
c1, c2 = st.columns(2)
|
| 600 |
+
with c1: recipient = st.text_input("Recipient")
|
| 601 |
+
with c2: topic = st.text_input("Topic")
|
| 602 |
+
|
| 603 |
+
st.caption("Content Source")
|
| 604 |
+
input_method = st.toggle("Upload notes file?")
|
| 605 |
+
raw_notes = ""
|
| 606 |
+
if input_method:
|
| 607 |
+
notes_file = st.file_uploader("Upload Notes (.txt)", type=["txt"])
|
| 608 |
+
if notes_file: raw_notes = notes_file.read().decode("utf-8")
|
| 609 |
+
else:
|
| 610 |
+
raw_notes = st.text_area("Paste notes:", height=150)
|
| 611 |
+
|
| 612 |
+
# Context Bar
|
| 613 |
+
est_tokens = len(raw_notes) / 4
|
| 614 |
+
st.progress(min(est_tokens / 128000, 1.0), text=f"Context: {int(est_tokens)} tokens")
|
| 615 |
+
|
| 616 |
+
if st.button("Draft Email", type="primary"):
|
| 617 |
+
if not raw_notes:
|
| 618 |
+
st.warning("Please provide notes.")
|
| 619 |
+
else:
|
| 620 |
+
clean_notes = clean_text(raw_notes)
|
| 621 |
+
with st.spinner(f"Drafting with {model_choice}..."):
|
| 622 |
+
prompt = f"TASK: Write email.\nTO: {recipient}\nTOPIC: {topic}\nSTYLE: {selected_persona_instruction}\nDATA: {clean_notes}"
|
| 623 |
+
|
| 624 |
+
reply, usage = ask_ai(prompt, "You are an expert ghostwriter.", max_len)
|
| 625 |
+
st.session_state.email_draft = reply
|
| 626 |
+
|
| 627 |
+
if usage:
|
| 628 |
+
# 1. Determine a clean name for the log
|
| 629 |
+
if "GPT-4o" in model_choice:
|
| 630 |
+
m_name = "GPT-4o"
|
| 631 |
+
else:
|
| 632 |
+
# Use the first word of the model choice (e.g., "Llama", "Gemma", "Granite")
|
| 633 |
+
m_name = model_choice.split(" ")[0]
|
| 634 |
+
# 2. Log it
|
| 635 |
+
tracker.log_usage(m_name, usage["input"], usage["output"])
|
| 636 |
+
update_sidebar_metrics() # Force update
|
| 637 |
+
|
| 638 |
+
if st.session_state.email_draft:
|
| 639 |
+
st.subheader("Draft Result")
|
| 640 |
+
st.text_area("Copy your email:", value=st.session_state.email_draft, height=300)
|
| 641 |
+
|
| 642 |
+
# --- TAB 2: CHAT PLAYGROUND ---
|
| 643 |
+
with tab2:
|
| 644 |
+
st.header("Choose Your Model and Start a Discussion")
|
| 645 |
+
|
| 646 |
+
# --- INITIALIZE CHAT MEMORY (MUST BE DONE FIRST) ---
|
| 647 |
+
if "messages" not in st.session_state:
|
| 648 |
+
st.session_state.messages = []
|
| 649 |
+
|
| 650 |
+
# --- CONTROLS AND METRICS ---
|
| 651 |
+
# The controls are kept outside the chat loop.
|
| 652 |
+
c1, c2, c3 = st.columns([2, 1, 1])
|
| 653 |
+
with c1:
|
| 654 |
+
# Use the global model_choice from the sidebar/tab1 initialization
|
| 655 |
+
selected_model_name = st.session_state.get('model_choice', 'Granite 4 (IBM)')
|
| 656 |
+
with c2:
|
| 657 |
+
use_rag = st.toggle("π Enable Knowledge Base", value=False)
|
| 658 |
+
# The token progress bar will be handled inside the prompt logic based on input length
|
| 659 |
+
|
| 660 |
+
with c3:
|
| 661 |
+
# --- NEW FEATURE: DOWNLOAD CHAT ---
|
| 662 |
+
# Convert history to a readable string
|
| 663 |
+
chat_log = ""
|
| 664 |
+
for msg in st.session_state.messages:
|
| 665 |
+
role = "USER" if msg['role'] == 'user' else "ASSISTANT"
|
| 666 |
+
chat_log += f"[{role}]: {msg['content']}\n\n"
|
| 667 |
+
|
| 668 |
+
# Only show button if there is history to save
|
| 669 |
+
if chat_log:
|
| 670 |
+
st.download_button(
|
| 671 |
+
label="πΎ Save Chat",
|
| 672 |
+
data=chat_log,
|
| 673 |
+
file_name="mission_log.txt",
|
| 674 |
+
mime="text/plain",
|
| 675 |
+
help="Download the current conversation history."
|
| 676 |
+
)
|
| 677 |
+
|
| 678 |
+
st.divider()
|
| 679 |
+
|
| 680 |
+
# --- DISPLAY CONVERSATION HISTORY ---
|
| 681 |
+
for message in st.session_state.messages:
|
| 682 |
+
with st.chat_message(message["role"]):
|
| 683 |
+
st.markdown(message["content"])
|
| 684 |
+
|
| 685 |
+
# --- CHAT INPUT HANDLING ---
|
| 686 |
+
if prompt := st.chat_input("Ask a question..."):
|
| 687 |
+
# 1. Display User Message and save to history
|
| 688 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 689 |
+
with st.chat_message("user"):
|
| 690 |
+
st.markdown(prompt)
|
| 691 |
+
|
| 692 |
+
# 2. Default Configuration (Standard AI Mode)
|
| 693 |
+
system_persona = "You are a helpful AI assistant. Answer the user's question to the best of your ability."
|
| 694 |
+
final_user_content = prompt
|
| 695 |
+
retrieved_docs = []
|
| 696 |
+
|
| 697 |
+
# 3. Handle RAG Logic (Only if enabled)
|
| 698 |
+
if use_rag:
|
| 699 |
+
with st.spinner("π§ Searching Knowledge Base..."):
|
| 700 |
+
retrieved_docs = rag_engine.search_knowledge_base(
|
| 701 |
+
prompt,
|
| 702 |
+
st.session_state.username
|
| 703 |
+
)
|
| 704 |
+
|
| 705 |
+
if retrieved_docs:
|
| 706 |
+
# RAG SUCCESS: Switch to Strict Navy Persona
|
| 707 |
+
system_persona = (
|
| 708 |
+
"You are a Navy Document Analyst. Your task is to answer the user's question "
|
| 709 |
+
"using ONLY the Context provided below. "
|
| 710 |
+
"If the answer is not present in the Context, return ONLY this exact phrase: "
|
| 711 |
+
"'I cannot find that information in the provided documents.'"
|
| 712 |
+
)
|
| 713 |
+
|
| 714 |
+
# Format Context
|
| 715 |
+
context_text = ""
|
| 716 |
+
for doc in retrieved_docs:
|
| 717 |
+
score = doc.metadata.get('relevance_score', 'N/A')
|
| 718 |
+
src = os.path.basename(doc.metadata.get('source', 'Unknown'))
|
| 719 |
+
context_text += f"---\nSOURCE: {src} (Rel: {score})\nTEXT: {doc.page_content}\n"
|
| 720 |
+
|
| 721 |
+
# Augment User Prompt
|
| 722 |
+
final_user_content = (
|
| 723 |
+
f"User Question: {prompt}\n\n"
|
| 724 |
+
f"Relevant Context:\n{context_text}\n\n"
|
| 725 |
+
"Answer the question using the context provided."
|
| 726 |
+
)
|
| 727 |
+
|
| 728 |
+
# 4. Construct Payload (Now using the CORRECT persona)
|
| 729 |
+
messages_payload = [{"role": "system", "content": system_persona}]
|
| 730 |
+
|
| 731 |
+
# --- MEMORY LOGIC: SLIDING WINDOW ---
|
| 732 |
+
history_depth = 8
|
| 733 |
+
recent_history = st.session_state.messages[-(history_depth+1):-1]
|
| 734 |
+
messages_payload.extend(recent_history)
|
| 735 |
+
|
| 736 |
+
# Add the final (potentially augmented) user message
|
| 737 |
+
messages_payload.append({"role": "user", "content": final_user_content})
|
| 738 |
+
|
| 739 |
+
# 5. Generate Response
|
| 740 |
+
with st.chat_message("assistant"):
|
| 741 |
+
with st.spinner(f"Thinking with {selected_model_name}..."):
|
| 742 |
+
# Determine max_len
|
| 743 |
+
# (Ensure max_len is defined in the script scope or use st.session_state if needed.
|
| 744 |
+
# In your code it's defined in the sidebar, so it should be accessible here)
|
| 745 |
+
|
| 746 |
+
# --- MODEL MAPPING ---
|
| 747 |
+
model_id = ""
|
| 748 |
+
ollama_map = {
|
| 749 |
+
"Granite 4 (IBM)": "granite4:latest",
|
| 750 |
+
"Llama 3.2 (Meta)": "llama3.2:latest",
|
| 751 |
+
"Gemma 3 (Google)": "gemma3:latest"
|
| 752 |
+
}
|
| 753 |
+
for key, val in ollama_map.items():
|
| 754 |
+
if key in selected_model_name:
|
| 755 |
+
model_id = val
|
| 756 |
+
break
|
| 757 |
+
|
| 758 |
+
if not model_id and "gpt" in selected_model_name.lower():
|
| 759 |
+
response, usage = query_openai_model(messages_payload, max_len)
|
| 760 |
+
elif model_id:
|
| 761 |
+
response, usage = query_local_model(messages_payload, max_len, model_id)
|
| 762 |
+
else:
|
| 763 |
+
response, usage = "Error: Could not determine model to use.", None
|
| 764 |
+
|
| 765 |
+
st.markdown(response)
|
| 766 |
+
|
| 767 |
+
# 6. Save Assistant Response
|
| 768 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
| 769 |
+
|
| 770 |
+
# 7. Metrics & Context Display
|
| 771 |
+
if usage:
|
| 772 |
+
if "GPT-4o" in selected_model_name:
|
| 773 |
+
m_name = "GPT-4o"
|
| 774 |
+
else:
|
| 775 |
+
m_name = selected_model_name.split(" ")[0]
|
| 776 |
+
tracker.log_usage(m_name, usage["input"], usage["output"])
|
| 777 |
+
update_sidebar_metrics()
|
| 778 |
+
|
| 779 |
+
if use_rag and retrieved_docs:
|
| 780 |
+
with st.expander("π View Context Used"):
|
| 781 |
+
for i, doc in enumerate(retrieved_docs):
|
| 782 |
+
score = doc.metadata.get('relevance_score', 'N/A')
|
| 783 |
+
src = os.path.basename(doc.metadata.get('source', 'Unknown'))
|
| 784 |
+
st.caption(f"Rank {i+1} (Source: {src}, Rel: {score})")
|
| 785 |
+
st.text(doc.page_content)
|
| 786 |
+
st.divider()
|
| 787 |
+
# --- TAB 3: PROMPT ARCHITECT ---
|
| 788 |
+
with tab3:
|
| 789 |
+
st.header("π οΈ Mega-Prompt Factory")
|
| 790 |
+
st.info("Build standard templates for NIPRGPT.")
|
| 791 |
+
|
| 792 |
+
c1, c2 = st.columns([1,1])
|
| 793 |
+
with c1:
|
| 794 |
+
st.subheader("1. Parameters")
|
| 795 |
+
p = st.text_area("Persona", placeholder="Act as...", height=100)
|
| 796 |
+
c = st.text_area("Context", placeholder="Background...", height=100)
|
| 797 |
+
t = st.text_area("Task", placeholder="Action...", height=100)
|
| 798 |
+
v = st.text_input("Placeholder Name", value="PASTE_DATA_HERE")
|
| 799 |
+
|
| 800 |
+
with c2:
|
| 801 |
+
st.subheader("2. Result")
|
| 802 |
+
final = f"### ROLE\n{p}\n### CONTEXT\n{c}\n### TASK\n{t}\n### INPUT DATA\n\"\"\"\n[{v}]\n\"\"\""
|
| 803 |
+
st.code(final, language="markdown")
|
| 804 |
+
st.download_button("πΎ Download .txt", final, "template.txt")
|
| 805 |
+
|
| 806 |
+
# --- TAB 4: KNOWLEDGE BASE ---
|
| 807 |
+
with tab4:
|
| 808 |
+
st.header("π§ Personal Knowledge Base")
|
| 809 |
+
st.info(f"Managing knowledge for: **{st.session_state.username}**")
|
| 810 |
+
|
| 811 |
+
# We no longer check 'is_admin' for the whole tab
|
| 812 |
+
kb_tab1, kb_tab2 = st.tabs(["π€ Add Documents", "ποΈ Manage Database"])
|
| 813 |
+
|
| 814 |
+
# --- SUB-TAB 1: UPLOAD ---
|
| 815 |
+
with kb_tab1:
|
| 816 |
+
st.subheader("Ingest New Knowledge")
|
| 817 |
+
uploaded_file = st.file_uploader("Upload Instructions, Manuals, or Logs", type=["pdf", "docx", "txt", "md"])
|
| 818 |
+
|
| 819 |
+
col1, col2 = st.columns([1, 2])
|
| 820 |
+
with col1:
|
| 821 |
+
chunk_strategy = st.selectbox(
|
| 822 |
+
"Chunking Strategy",
|
| 823 |
+
["paragraph", "token", "page"],
|
| 824 |
+
help="Paragraph: Manuals. Token: Dense text. Page: Forms."
|
| 825 |
+
)
|
| 826 |
+
|
| 827 |
+
if uploaded_file and st.button("Process & Add"):
|
| 828 |
+
with st.spinner("Analyzing and Indexing..."):
|
| 829 |
+
# 1. Save temp file
|
| 830 |
+
temp_path = rag_engine.save_uploaded_file(uploaded_file)
|
| 831 |
+
|
| 832 |
+
# 2. Process into USER'S specific DB (st.session_state.username)
|
| 833 |
+
success, msg = rag_engine.process_and_add_document(
|
| 834 |
+
temp_path,
|
| 835 |
+
st.session_state.username,
|
| 836 |
+
chunk_strategy
|
| 837 |
+
)
|
| 838 |
+
|
| 839 |
+
if success:
|
| 840 |
+
st.success(msg)
|
| 841 |
+
st.rerun()
|
| 842 |
+
else:
|
| 843 |
+
st.error(f"Failed: {msg}")
|
| 844 |
+
|
| 845 |
+
st.divider()
|
| 846 |
+
st.subheader("π Quick Test")
|
| 847 |
+
test_query = st.text_input("Ask your brain something...")
|
| 848 |
+
if test_query:
|
| 849 |
+
results = rag_engine.search_knowledge_base(test_query, st.session_state.username)
|
| 850 |
+
if not results:
|
| 851 |
+
st.warning("No matches found.")
|
| 852 |
+
for i, doc in enumerate(results):
|
| 853 |
+
src_name = os.path.basename(doc.metadata.get('source', '?'))
|
| 854 |
+
score = doc.metadata.get('relevance_score', 'N/A')
|
| 855 |
+
with st.expander(f"Match {i+1}: {src_name} (Score: {score})"):
|
| 856 |
+
st.write(doc.page_content)
|
| 857 |
+
|
| 858 |
+
# --- SUB-TAB 2: MANAGE (Unlocked for Everyone) ---
|
| 859 |
+
with kb_tab2:
|
| 860 |
+
st.subheader("ποΈ Database Inventory")
|
| 861 |
+
|
| 862 |
+
docs = rag_engine.list_documents(st.session_state.username)
|
| 863 |
+
|
| 864 |
+
if not docs:
|
| 865 |
+
st.info("Your Knowledge Base is empty.")
|
| 866 |
+
else:
|
| 867 |
+
st.markdown(f"**Total Documents:** {len(docs)}")
|
| 868 |
+
|
| 869 |
+
for doc in docs:
|
| 870 |
+
c1, c2, c3, c4 = st.columns([3, 2, 1, 1])
|
| 871 |
+
with c1:
|
| 872 |
+
st.text(f"π {doc['filename']}")
|
| 873 |
+
with c2:
|
| 874 |
+
# FIX: Show strategy
|
| 875 |
+
st.caption(f"βοΈ {doc.get('strategy', 'Unknown')}")
|
| 876 |
+
with c3:
|
| 877 |
+
st.caption(f"{doc['chunks']}")
|
| 878 |
+
with c4:
|
| 879 |
+
if st.button("ποΈ", key=doc['source'], help="Delete Document"):
|
| 880 |
+
with st.spinner("Deleting..."):
|
| 881 |
+
success, msg = rag_engine.delete_document(st.session_state.username, doc['source'])
|
| 882 |
+
if success:
|
| 883 |
+
st.success(msg)
|
| 884 |
+
st.rerun()
|
| 885 |
+
else:
|
| 886 |
+
st.error(msg)
|
| 887 |
+
|
| 888 |
+
st.divider()
|
| 889 |
+
with st.expander("π¨ Danger Zone"):
|
| 890 |
+
# Allow ANY user to reset their OWN database
|
| 891 |
+
if st.button("β’οΈ RESET MY DATABASE", type="primary"):
|
| 892 |
+
success, msg = rag_engine.reset_knowledge_base(st.session_state.username)
|
| 893 |
+
if success:
|
| 894 |
+
st.success(msg)
|
| 895 |
+
st.rerun() ---
|
| 896 |
if prompt := st.chat_input("Ask a question..."):
|
| 897 |
# 1. Display User Message and save to history
|
| 898 |
st.session_state.messages.append({"role": "user", "content": prompt})
|