NavyDevilDoc commited on
Commit
3755446
·
verified ·
1 Parent(s): faefa8f

Upload 4 files

Browse files
Files changed (4) hide show
  1. src/app.py +445 -0
  2. src/rag_engine.py +198 -0
  3. src/resources.py +20 -0
  4. src/tracker.py +262 -0
src/app.py ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+ import os
4
+ import unicodedata
5
+ import resources # Assuming this file exists in your repo
6
+ import tracker
7
+ import rag_engine # Now safe to import at top level (lazy loading enabled)
8
+ from openai import OpenAI
9
+ from datetime import datetime
10
+
11
+ # --- CONFIGURATION ---
12
+ st.set_page_config(page_title="Navy AI Toolkit", page_icon="⚓", layout="wide")
13
+
14
+ # 1. SETUP CREDENTIALS
15
+ API_URL_ROOT = os.getenv("API_URL") # For Ollama models
16
+ OPENAI_KEY = os.getenv("OPENAI_API_KEY") # For GPT-4o
17
+
18
+ # --- INITIALIZATION ---
19
+ if "roles" not in st.session_state:
20
+ st.session_state.roles = []
21
+
22
+ # --- LOGIN / REGISTER LOGIC ---
23
+ if "authentication_status" not in st.session_state or st.session_state["authentication_status"] is None:
24
+ # If not logged in, show tabs
25
+ login_tab, register_tab = st.tabs(["🔑 Login", "📝 Register"])
26
+
27
+ with login_tab:
28
+ is_logged_in = tracker.check_login()
29
+ # FIX: Trigger User DB Download ONLY on fresh login
30
+ if is_logged_in:
31
+ tracker.download_user_db(st.session_state.username)
32
+ st.rerun() # Refresh to show the app
33
+
34
+ with register_tab:
35
+ st.header("Create Account")
36
+ with st.form("reg_form"):
37
+ new_user = st.text_input("Username")
38
+ new_name = st.text_input("Display Name")
39
+ new_email = st.text_input("Email")
40
+ new_pwd = st.text_input("Password", type="password")
41
+ invite = st.text_input("Invitation Passcode")
42
+
43
+ if st.form_submit_button("Register"):
44
+ success, msg = tracker.register_user(new_email, new_user, new_name, new_pwd, invite)
45
+ if success:
46
+ st.success(msg)
47
+ else:
48
+ st.error(msg)
49
+
50
+ # Stop execution if not logged in
51
+ if not st.session_state.get("authentication_status"):
52
+ st.stop()
53
+
54
+ # --- GLOBAL PLACEHOLDERS ---
55
+ metric_placeholder = None
56
+ admin_metric_placeholder = None
57
+
58
+ # --- SIDEBAR (CONSOLIDATED) ---
59
+ with st.sidebar:
60
+ st.header("👤 User Profile")
61
+ st.write(f"Welcome, **{st.session_state.name}**")
62
+
63
+ st.header("📊 Usage Tracker")
64
+ metric_placeholder = st.empty()
65
+
66
+ # Admin Tools
67
+ if "admin" in st.session_state.roles:
68
+ st.divider()
69
+ st.header("🛡️ Admin Tools")
70
+ admin_metric_placeholder = st.empty()
71
+
72
+ # FIX: Point to the correct persistence path
73
+ log_path = tracker.get_log_path()
74
+ if log_path.exists():
75
+ with open(log_path, "r") as f:
76
+ log_data = f.read()
77
+ st.download_button(
78
+ label="📥 Download Usage Logs",
79
+ data=log_data,
80
+ file_name=f"usage_log_{datetime.now().strftime('%Y-%m-%d')}.json",
81
+ mime="application/json"
82
+ )
83
+ else:
84
+ st.warning("No logs found yet.")
85
+
86
+ # Logout
87
+ if "authenticator" in st.session_state:
88
+ st.session_state.authenticator.logout(location='sidebar')
89
+
90
+ st.divider()
91
+
92
+ # --- MODEL SELECTOR ---
93
+ st.header("🧠 Model Selector")
94
+
95
+ model_map = {
96
+ "Granite 4 (IBM)": "granite4:latest",
97
+ "Llama 3.2 (Meta)": "llama3.2:latest",
98
+ "Gemma 3 (Google)": "gemma3:latest"
99
+ }
100
+
101
+ model_options = list(model_map.keys())
102
+ model_captions = ["Slower for now, but free and private" for _ in model_options]
103
+
104
+ if "admin" in st.session_state.roles:
105
+ model_options.append("GPT-4o (Omni)")
106
+ model_captions.append("Fast, smart, sends data to OpenAI")
107
+
108
+ model_choice = st.radio(
109
+ "Choose your Intelligence:",
110
+ model_options,
111
+ captions=model_captions
112
+ )
113
+ st.info(f"Connected to: **{model_choice}**")
114
+
115
+ st.divider()
116
+ st.header("⚙️ Controls")
117
+ max_len = st.slider("Max Response Length (Tokens)", 100, 2000, 500)
118
+
119
+ # --- HELPER FUNCTIONS ---
120
+ def update_sidebar_metrics():
121
+ """Refreshes the global placeholders defined in the sidebar."""
122
+ if metric_placeholder is None:
123
+ return
124
+
125
+ stats = tracker.get_daily_stats()
126
+ user_stats = stats["users"].get(st.session_state.username, {"input":0, "output":0})
127
+
128
+ metric_placeholder.metric("My Tokens Today", user_stats["input"] + user_stats["output"])
129
+
130
+ if "admin" in st.session_state.roles and admin_metric_placeholder is not None:
131
+ admin_metric_placeholder.metric("Team Total Today", stats["total_tokens"])
132
+
133
+ # Call metrics once on load
134
+ update_sidebar_metrics()
135
+
136
+ def query_local_model(user_prompt, system_persona, max_tokens, model_name):
137
+ if not API_URL_ROOT:
138
+ return "Error: API_URL not set.", None
139
+
140
+ url = API_URL_ROOT + "/generate"
141
+ payload = {
142
+ "text": user_prompt,
143
+ "persona": system_persona,
144
+ "max_tokens": max_tokens,
145
+ "model": model_name
146
+ }
147
+
148
+ try:
149
+ response = requests.post(url, json=payload, timeout=120)
150
+
151
+ if response.status_code == 200:
152
+ response_data = response.json()
153
+ ans = response_data.get("response", "")
154
+ usage = response_data.get("usage", {"input":0, "output":0})
155
+ return ans, usage
156
+
157
+ return f"Error {response.status_code}: {response.text}", None
158
+
159
+ except Exception as e:
160
+ return f"Connection Error: {e}", None
161
+
162
+ def query_gpt4o(prompt, persona, max_tokens):
163
+ if not OPENAI_KEY:
164
+ return "Error: OPENAI_API_KEY not set.", None
165
+
166
+ client = OpenAI(api_key=OPENAI_KEY)
167
+
168
+ try:
169
+ response = client.chat.completions.create(
170
+ model="gpt-4o",
171
+ max_tokens=max_tokens,
172
+ messages=[
173
+ {"role": "system", "content": persona},
174
+ {"role": "user", "content": prompt}
175
+ ],
176
+ temperature=0.3
177
+ )
178
+ usage_obj = response.usage
179
+ usage_dict = {"input": usage_obj.prompt_tokens, "output": usage_obj.completion_tokens}
180
+ return response.choices[0].message.content, usage_dict
181
+
182
+ except Exception as e:
183
+ return f"OpenAI Error: {e}", None
184
+
185
+ def clean_text(text):
186
+ if not text: return ""
187
+ text = unicodedata.normalize('NFKC', text)
188
+ replacements = {'“': '"', '”': '"', '‘': "'", '’': "'", '–': '-', '—': '-', '…': '...', '\u00a0': ' '}
189
+ for old, new in replacements.items():
190
+ text = text.replace(old, new)
191
+ return text.strip()
192
+
193
+ def ask_ai(user_prompt, system_persona, max_tokens):
194
+ if "GPT-4o" in model_choice:
195
+ return query_gpt4o(user_prompt, system_persona, max_tokens)
196
+ else:
197
+ technical_name = model_map[model_choice]
198
+ return query_local_model(user_prompt, system_persona, max_tokens, technical_name)
199
+
200
+ # --- MAIN UI ---
201
+ st.title("AI Toolkit")
202
+ tab1, tab2, tab3, tab4 = st.tabs(["📧 Email Builder", "💬 Chat Playground", "🛠️ Prompt Architect", "📚 Knowledge Base"])
203
+
204
+ # --- TAB 1: EMAIL BUILDER ---
205
+ with tab1:
206
+ st.header("Structured Email Generator")
207
+ if "email_draft" not in st.session_state:
208
+ st.session_state.email_draft = ""
209
+
210
+ st.subheader("1. Define the Voice")
211
+ style_mode = st.radio("How should the AI write?", ["Use a Preset Persona", "Mimic My Style"], horizontal=True)
212
+
213
+ selected_persona_instruction = ""
214
+ if style_mode == "Use a Preset Persona":
215
+ persona_name = st.selectbox("Select a Persona", list(resources.TONE_LIBRARY.keys()))
216
+ selected_persona_instruction = resources.TONE_LIBRARY[persona_name]
217
+ st.info(f"**System Instruction:** {selected_persona_instruction}")
218
+ else:
219
+ st.info("Upload 1-3 text files of your previous emails.")
220
+ uploaded_style_files = st.file_uploader("Upload Samples (.txt)", type=["txt"], accept_multiple_files=True)
221
+ if uploaded_style_files:
222
+ style_context = ""
223
+ for uploaded_file in uploaded_style_files:
224
+ string_data = uploaded_file.read().decode("utf-8")
225
+ style_context += f"---\n{string_data}\n---\n"
226
+ selected_persona_instruction = f"Analyze these examples and mimic the style:\n{style_context}"
227
+
228
+ st.divider()
229
+ st.subheader("2. Details")
230
+ c1, c2 = st.columns(2)
231
+ with c1: recipient = st.text_input("Recipient")
232
+ with c2: topic = st.text_input("Topic")
233
+
234
+ st.caption("Content Source")
235
+ input_method = st.toggle("Upload notes file?")
236
+ raw_notes = ""
237
+ if input_method:
238
+ notes_file = st.file_uploader("Upload Notes (.txt)", type=["txt"])
239
+ if notes_file: raw_notes = notes_file.read().decode("utf-8")
240
+ else:
241
+ raw_notes = st.text_area("Paste notes:", height=150)
242
+
243
+ # Context Bar
244
+ est_tokens = len(raw_notes) / 4
245
+ st.progress(min(est_tokens / 128000, 1.0), text=f"Context: {int(est_tokens)} tokens")
246
+
247
+ if st.button("Draft Email", type="primary"):
248
+ if not raw_notes:
249
+ st.warning("Please provide notes.")
250
+ else:
251
+ clean_notes = clean_text(raw_notes)
252
+ with st.spinner(f"Drafting with {model_choice}..."):
253
+ prompt = f"TASK: Write email.\nTO: {recipient}\nTOPIC: {topic}\nSTYLE: {selected_persona_instruction}\nDATA: {clean_notes}"
254
+
255
+ reply, usage = ask_ai(prompt, "You are an expert ghostwriter.", max_len)
256
+ st.session_state.email_draft = reply
257
+
258
+ if usage:
259
+ m_name = "Granite" if "Granite" in model_choice else "GPT-4o"
260
+ tracker.log_usage(m_name, usage["input"], usage["output"])
261
+ update_sidebar_metrics() # Force update
262
+
263
+ if st.session_state.email_draft:
264
+ st.subheader("Draft Result")
265
+ st.text_area("Copy your email:", value=st.session_state.email_draft, height=300)
266
+
267
+ # --- TAB 2: CHAT PLAYGROUND ---
268
+ with tab2:
269
+ st.header("Choose Your Model and Start a Discussion")
270
+
271
+ if "chat_response" not in st.session_state:
272
+ st.session_state.chat_response = ""
273
+
274
+ user_input = st.text_input("Ask a question:")
275
+
276
+ c1, c2 = st.columns([1,1])
277
+ with c1:
278
+ use_rag = st.toggle("🔌 Enable Knowledge Base", value=True)
279
+ with c2:
280
+ est_tokens = len(user_input) / 4
281
+ st.progress(min(est_tokens / 2000, 1.0), text=f"Input: {int(est_tokens)} tokens")
282
+
283
+ if st.button("Send Query"):
284
+ if not user_input:
285
+ st.warning("Please enter a question.")
286
+ else:
287
+ final_prompt = user_input
288
+ system_persona = "You are a helpful assistant."
289
+
290
+ # --- RAG LOGIC ---
291
+ if use_rag:
292
+ with st.spinner("🧠 Searching Knowledge Base..."):
293
+ # 1. Retrieve & Rerank (Now using the fixed function)
294
+ retrieved_docs = rag_engine.search_knowledge_base(
295
+ user_input,
296
+ st.session_state.username,
297
+ k=3
298
+ )
299
+
300
+ if retrieved_docs:
301
+ # 2. Format Context
302
+ context_text = ""
303
+ for i, doc in enumerate(retrieved_docs):
304
+ # Add metadata relevance score if available
305
+ score = doc.metadata.get('relevance_score', 'N/A')
306
+ src = os.path.basename(doc.metadata.get('source', 'Unknown'))
307
+ context_text += f"---\nSOURCE: {src} (Rel: {score})\nTEXT: {doc.page_content}\n"
308
+
309
+ # 3. Update Prompt
310
+ system_persona = (
311
+ "You are a Navy Document Analyst. "
312
+ "Answer the user's question strictly based on the Context provided below. "
313
+ "If the answer is not in the Context, state 'I cannot find that information in the provided documents.' \n\n"
314
+ f"### CONTEXT:\n{context_text}"
315
+ )
316
+ st.success(f"Found {len(retrieved_docs)} relevant documents.")
317
+ with st.expander("View Context Used"):
318
+ st.text(context_text)
319
+ else:
320
+ st.warning("No relevant documents found. Using general knowledge.")
321
+
322
+ # --- GENERATION ---
323
+ with st.spinner(f"Thinking with {model_choice}..."):
324
+ reply, usage = ask_ai(final_prompt, system_persona, max_len)
325
+ st.session_state.chat_response = reply
326
+
327
+ if usage:
328
+ m_name = "Granite" if "Granite" in model_choice else "GPT-4o"
329
+ tracker.log_usage(m_name, usage["input"], usage["output"])
330
+ update_sidebar_metrics()
331
+
332
+ if st.session_state.chat_response:
333
+ st.divider()
334
+ st.markdown("**AI Response:**")
335
+ st.write(st.session_state.chat_response)
336
+
337
+ # --- TAB 3: PROMPT ARCHITECT ---
338
+ with tab3:
339
+ st.header("🛠️ Mega-Prompt Factory")
340
+ st.info("Build standard templates for NIPRGPT.")
341
+
342
+ c1, c2 = st.columns([1,1])
343
+ with c1:
344
+ st.subheader("1. Parameters")
345
+ p = st.text_area("Persona", placeholder="Act as...", height=100)
346
+ c = st.text_area("Context", placeholder="Background...", height=100)
347
+ t = st.text_area("Task", placeholder="Action...", height=100)
348
+ v = st.text_input("Placeholder Name", value="PASTE_DATA_HERE")
349
+
350
+ with c2:
351
+ st.subheader("2. Result")
352
+ final = f"### ROLE\n{p}\n### CONTEXT\n{c}\n### TASK\n{t}\n### INPUT DATA\n\"\"\"\n[{v}]\n\"\"\""
353
+ st.code(final, language="markdown")
354
+ st.download_button("💾 Download .txt", final, "template.txt")
355
+
356
+ # --- TAB 4: KNOWLEDGE BASE ---
357
+ with tab4:
358
+ st.header("🧠 Unit Knowledge Base")
359
+
360
+ is_admin = "admin" in st.session_state.roles
361
+ kb_tab1, kb_tab2 = st.tabs(["📤 Add Documents", "🗂️ Manage Database"])
362
+
363
+ # --- SUB-TAB 1: UPLOAD ---
364
+ with kb_tab1:
365
+ if is_admin:
366
+ st.subheader("Ingest New Knowledge")
367
+ uploaded_file = st.file_uploader("Upload Instructions, Manuals, or Logs", type=["pdf", "docx", "txt", "md"])
368
+
369
+ col1, col2 = st.columns([1, 2])
370
+ with col1:
371
+ chunk_strategy = st.selectbox(
372
+ "Chunking Strategy",
373
+ ["paragraph", "token", "page"],
374
+ help="Paragraph: Manuals. Token: Dense text. Page: Forms."
375
+ )
376
+
377
+ if uploaded_file and st.button("Process & Add"):
378
+ with st.spinner("Analyzing and Indexing..."):
379
+ # Use safe save + process
380
+ temp_path = rag_engine.save_uploaded_file(uploaded_file)
381
+ success, msg = rag_engine.process_and_add_document(
382
+ temp_path,
383
+ st.session_state.username,
384
+ chunk_strategy
385
+ )
386
+
387
+ if success:
388
+ st.success(msg)
389
+ st.rerun()
390
+ else:
391
+ st.error(f"Failed: {msg}")
392
+ else:
393
+ st.info("🔒 Only Admins can upload documents.")
394
+
395
+ st.divider()
396
+ st.subheader("🔎 Quick Test")
397
+ test_query = st.text_input("Ask the brain something...")
398
+ if test_query:
399
+ results = rag_engine.search_knowledge_base(test_query, st.session_state.username)
400
+ for i, doc in enumerate(results):
401
+ # Using cleaned safe basename
402
+ src_name = os.path.basename(doc.metadata.get('source', '?'))
403
+ score = doc.metadata.get('relevance_score', 'N/A')
404
+ with st.expander(f"Match {i+1}: {src_name} (Score: {score})"):
405
+ st.write(doc.page_content)
406
+
407
+ # --- SUB-TAB 2: MANAGE ---
408
+ with kb_tab2:
409
+ st.subheader("🗄️ Database Inventory")
410
+
411
+ # 1. Fetch current docs
412
+ docs = rag_engine.list_documents(st.session_state.username)
413
+
414
+ if not docs:
415
+ st.info("Knowledge Base is empty.")
416
+ else:
417
+ st.markdown(f"**Total Documents:** {len(docs)}")
418
+
419
+ for doc in docs:
420
+ c1, c2, c3 = st.columns([3, 1, 1])
421
+ with c1:
422
+ st.text(f"📄 {doc['filename']}")
423
+ with c2:
424
+ st.caption(f"{doc['chunks']} chunks")
425
+ with c3:
426
+ if is_admin:
427
+ if st.button("🗑️ Delete", key=doc['source']):
428
+ with st.spinner("Deleting..."):
429
+ success, msg = rag_engine.delete_document(st.session_state.username, doc['source'])
430
+ if success:
431
+ st.success(msg)
432
+ st.rerun()
433
+ else:
434
+ st.error(msg)
435
+ else:
436
+ st.caption("Read Only")
437
+
438
+ if is_admin and docs:
439
+ st.divider()
440
+ with st.expander("🚨 Danger Zone"):
441
+ if st.button("☢️ RESET ENTIRE DATABASE", type="primary"):
442
+ success, msg = rag_engine.reset_knowledge_base(st.session_state.username)
443
+ if success:
444
+ st.success(msg)
445
+ st.rerun()
src/rag_engine.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from langchain_chroma import Chroma
3
+ from langchain_huggingface import HuggingFaceEmbeddings
4
+ from sentence_transformers import CrossEncoder
5
+ from core.ChunkingManager import ChunkingManager, ChunkingStrategy
6
+ import tracker # To trigger syncs
7
+
8
+ # --- CONFIGURATION ---
9
+ UPLOAD_DIR = "/tmp/rag_uploads"
10
+ DB_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "chroma_db")
11
+ EMBEDDING_MODEL_NAME = "all-MiniLM-L6-v2"
12
+ RERANKER_MODEL_NAME = "cross-encoder/ms-marco-MiniLM-L-6-v2"
13
+
14
+ # --- LAZY LOADING SINGLETONS ---
15
+ # We use these globals to store the models once loaded, so we don't reload them
16
+ # every time a function is called, but we also don't load them on import.
17
+ _embedding_fn = None
18
+ _reranker = None
19
+ _chunk_manager = None
20
+
21
+ def get_embedding_function():
22
+ """Lazy loads the embedding model only when needed."""
23
+ global _embedding_fn
24
+ if _embedding_fn is None:
25
+ print("⚙️ Loading Embedding Model...")
26
+ _embedding_fn = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME)
27
+ return _embedding_fn
28
+
29
+ def get_reranker_model():
30
+ """Lazy loads the CrossEncoder only when needed."""
31
+ global _reranker
32
+ if _reranker is None:
33
+ print("⚙️ Loading Reranker Model...")
34
+ _reranker = CrossEncoder(RERANKER_MODEL_NAME)
35
+ return _reranker
36
+
37
+ def get_chunk_manager():
38
+ """Lazy loads the Chunking Manager."""
39
+ global _chunk_manager
40
+ if _chunk_manager is None:
41
+ print("⚙️ Loading Chunk Manager...")
42
+ _chunk_manager = ChunkingManager(embedding_model_name=EMBEDDING_MODEL_NAME)
43
+ return _chunk_manager
44
+
45
+ # --- DATABASE OPERATIONS ---
46
+ def get_vectorstore(username):
47
+ """Returns the persistent ChromaDB for a SPECIFIC USER."""
48
+ # Safety: Ensure username doesn't contain path traversal characters
49
+ safe_username = os.path.basename(username)
50
+ user_db_path = os.path.join(DB_ROOT, safe_username)
51
+
52
+ if not os.path.exists(user_db_path):
53
+ os.makedirs(user_db_path, exist_ok=True)
54
+
55
+ return Chroma(
56
+ persist_directory=user_db_path,
57
+ embedding_function=get_embedding_function(),
58
+ collection_name=f"docs_{safe_username}"
59
+ )
60
+
61
+ def save_uploaded_file(uploaded_file):
62
+ """Saves upload to temp, sanitizing the filename."""
63
+ if not os.path.exists(UPLOAD_DIR):
64
+ os.makedirs(UPLOAD_DIR)
65
+
66
+ # SECURITY FIX: Sanitize filename to prevent directory traversal
67
+ safe_filename = os.path.basename(uploaded_file.name)
68
+ file_path = os.path.join(UPLOAD_DIR, safe_filename)
69
+
70
+ with open(file_path, "wb") as f:
71
+ f.write(uploaded_file.getbuffer())
72
+ return file_path
73
+
74
+ def process_and_add_document(file_path, username, strategy="paragraph"):
75
+ try:
76
+ print(f"🧠 Chunking {os.path.basename(file_path)}...")
77
+
78
+ strat_map = {
79
+ "token": ChunkingStrategy.TOKEN,
80
+ "paragraph": ChunkingStrategy.PARAGRAPH,
81
+ "page": ChunkingStrategy.PAGE
82
+ }
83
+ selected_strategy = strat_map.get(strategy, ChunkingStrategy.PARAGRAPH)
84
+
85
+ # Use the lazy-loaded chunk manager
86
+ manager = get_chunk_manager()
87
+ chunks = manager.process_document(
88
+ file_path=file_path,
89
+ strategy=selected_strategy,
90
+ preprocess=True
91
+ )
92
+
93
+ if not chunks:
94
+ return False, "No text extracted. Is the file empty/scanned?"
95
+
96
+ print(f"💾 Indexing {len(chunks)} chunks into Vector DB...")
97
+ db = get_vectorstore(username)
98
+ db.add_documents(chunks)
99
+
100
+ # Sync immediately
101
+ tracker.upload_user_db(username)
102
+
103
+ if os.path.exists(file_path):
104
+ os.remove(file_path)
105
+
106
+ return True, f"Successfully added {len(chunks)} chunks to Knowledge Base."
107
+
108
+ except Exception as e:
109
+ print(f"❌ Processing Error: {e}")
110
+ return False, str(e)
111
+
112
+ # --- RETRIEVAL ENGINE ---
113
+ def search_knowledge_base(query, username, k=3):
114
+ """
115
+ Two-Stage Retrieval System (RAG):
116
+ 1. Retrieval: Get 10 candidates via fast Vector Search.
117
+ 2. Reranking: Sort them via Cross-Encoder (Slow/Precise).
118
+ 3. Return top k.
119
+ """
120
+ db = get_vectorstore(username)
121
+ reranker = get_reranker_model()
122
+
123
+ # 1. Broad Search (Retrieve more than needed to filter later)
124
+ results = db.similarity_search(query, k=10)
125
+
126
+ if not results:
127
+ return []
128
+
129
+ # 2. Reranking
130
+ # Prepare pairs: [[Query, Text1], [Query, Text2]...]
131
+ passages = [doc.page_content for doc in results]
132
+ ranks = reranker.rank(query, passages)
133
+
134
+ # 3. Sort and Filter
135
+ # Reranker returns list of dicts: {'corpus_id': 0, 'score': 0.9}
136
+ top_results = []
137
+
138
+ # Sort ranks by score descending just to be safe (though .rank() usually sorts)
139
+ sorted_ranks = sorted(ranks, key=lambda x: x['score'], reverse=True)
140
+
141
+ for rank in sorted_ranks[:k]:
142
+ doc_index = rank['corpus_id']
143
+ doc = results[doc_index]
144
+ # Append score for transparency
145
+ doc.metadata["relevance_score"] = round(rank['score'], 4)
146
+ top_results.append(doc)
147
+
148
+ return top_results
149
+
150
+ def list_documents(username):
151
+ """
152
+ Returns a list of unique files currently in the user's database.
153
+ WARNING: This pulls all metadata. Performance degrades >10k chunks.
154
+ """
155
+ try:
156
+ db = get_vectorstore(username)
157
+ data = db.get()
158
+ metadatas = data['metadatas']
159
+
160
+ file_stats = {}
161
+
162
+ for meta in metadatas:
163
+ src = meta.get('source', 'unknown')
164
+ filename = os.path.basename(src)
165
+
166
+ if src not in file_stats:
167
+ file_stats[src] = {'source': src, 'filename': filename, 'chunks': 0}
168
+ file_stats[src]['chunks'] += 1
169
+
170
+ return list(file_stats.values())
171
+
172
+ except Exception as e:
173
+ print(f"❌ Error listing docs: {e}")
174
+ return []
175
+
176
+ def delete_document(username, source_path):
177
+ """Removes all chunks associated with a specific source file."""
178
+ try:
179
+ print(f"🗑️ Deleting {source_path} for {username}...")
180
+ db = get_vectorstore(username)
181
+
182
+ db.delete(where={"source": source_path})
183
+
184
+ tracker.upload_user_db(username)
185
+ return True, f"Deleted {os.path.basename(source_path)}"
186
+
187
+ except Exception as e:
188
+ return False, str(e)
189
+
190
+ def reset_knowledge_base(username):
191
+ """Nuke option: Clears the entire database for the user."""
192
+ try:
193
+ db = get_vectorstore(username)
194
+ db.delete_collection()
195
+ tracker.upload_user_db(username)
196
+ return True, "Knowledge Base completely reset."
197
+ except Exception as e:
198
+ return False, str(e)
src/resources.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A dictionary of pre-written system instructions
2
+ # This allows you to tweak the "Vibe" without breaking the code.
3
+ TONE_LIBRARY = {
4
+ "Standard Professional": (
5
+ "Use a standard US Navy professional tone. "
6
+ "Be concise, respectful, and use BLUF (Bottom Line Up Front) format."
7
+ ),
8
+ "The Hammer (Direct/Urgent)": (
9
+ "Be extremely direct and authoritative. Minimize pleasantries. "
10
+ "Focus strictly on facts, deadlines, and requirements. Use short sentences."
11
+ ),
12
+ "The Diplomat (Apologetic/Soft)": (
13
+ "Use a softer, diplomatic tone. Focus on preserving the relationship. "
14
+ "Acknowledge the inconvenience and offer solutions. Use 'We' instead of 'I'."
15
+ ),
16
+ "The Mentor (Encouraging)": (
17
+ "Use a coaching tone. Be corrective but encouraging. "
18
+ "Explain the 'Why' behind the instructions."
19
+ )
20
+ }
src/tracker.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import streamlit_authenticator as stauth
3
+ import yaml
4
+ from yaml.loader import SafeLoader
5
+ import json
6
+ import os
7
+ import uuid
8
+ from datetime import datetime
9
+ import pytz
10
+ from huggingface_hub import HfApi, hf_hub_download, snapshot_download, CommitScheduler
11
+ from pathlib import Path
12
+ import bcrypt
13
+
14
+ # --- CONFIGURATION ---
15
+ DATASET_REPO_ID = "NavyDevilDoc/navy-ai-logs"
16
+ LOG_FILE = "usage_log.json"
17
+ CONFIG_FILE = "config.yaml"
18
+ CHROMA_ROOT = "chroma_db"
19
+ HF_TOKEN = os.getenv("HF_TOKEN")
20
+ INVITE_CODE = os.getenv("INVITE_CODE", "CHANGE_ME_IN_SETTINGS") # Security Fix
21
+ TIMEZONE = pytz.timezone("US/Eastern")
22
+
23
+ # --- DATA PERSISTENCE SETUP (The Fix) ---
24
+ # Create a local directory that the Scheduler will watch
25
+ LOCAL_DATA_DIR = Path("data_persistence")
26
+ LOCAL_DATA_DIR.mkdir(exist_ok=True)
27
+
28
+ # Initialize the Scheduler
29
+ # This runs in the background and pushes changes every 1 minute
30
+ scheduler = CommitScheduler(
31
+ repo_id=DATASET_REPO_ID,
32
+ repo_type="dataset",
33
+ folder_path=LOCAL_DATA_DIR,
34
+ path_in_repo=".", # Sync to root of dataset
35
+ every=1, # Sync every 1 minute
36
+ token=HF_TOKEN
37
+ )
38
+
39
+ # --- PATH HELPERS ---
40
+ def get_config_path():
41
+ """Returns the path to the LOCAL config file in the persistence folder."""
42
+ return LOCAL_DATA_DIR / CONFIG_FILE
43
+
44
+ def get_log_path():
45
+ """Returns the path to the LOCAL log file in the persistence folder."""
46
+ return LOCAL_DATA_DIR / LOG_FILE
47
+
48
+ # --- GENERIC FILE SYNC (Cached!) ---
49
+ @st.cache_data(ttl=60) # Only check cloud every 60 seconds
50
+ def download_config_if_needed():
51
+ """Downloads config from HF only if cache is stale."""
52
+ if not HF_TOKEN: return
53
+ try:
54
+ hf_hub_download(
55
+ repo_id=DATASET_REPO_ID,
56
+ filename=CONFIG_FILE,
57
+ repo_type="dataset",
58
+ local_dir=LOCAL_DATA_DIR, # Download directly to watched folder
59
+ token=HF_TOKEN,
60
+ force_download=True
61
+ )
62
+ print("✅ Config refreshed from cloud.")
63
+ except Exception as e:
64
+ print(f"⚠️ Cloud pull failed for config: {e}")
65
+
66
+ # We don't cache logs because we need to write to them frequently
67
+ def ensure_log_exists():
68
+ if not (LOCAL_DATA_DIR / LOG_FILE).exists():
69
+ try:
70
+ hf_hub_download(
71
+ repo_id=DATASET_REPO_ID,
72
+ filename=LOG_FILE,
73
+ repo_type="dataset",
74
+ local_dir=LOCAL_DATA_DIR,
75
+ token=HF_TOKEN
76
+ )
77
+ except:
78
+ # Create empty log if it doesn't exist on cloud yet
79
+ with open(LOCAL_DATA_DIR / LOG_FILE, "w") as f:
80
+ json.dump({}, f)
81
+
82
+ # --- USER DB SYNC (For ChromaDB) ---
83
+ def download_user_db(username):
84
+ """Restores ONLY the specific user's Knowledge Base."""
85
+ if not HF_TOKEN: return
86
+
87
+ target_dir = os.path.dirname(os.path.abspath(__file__))
88
+ user_db_path = f"{CHROMA_ROOT}/{username}"
89
+
90
+ try:
91
+ # We don't use the scheduler for ChromaDB yet (too large)
92
+ # We stick to snapshot_download for now
93
+ print(f"📥 Syncing Knowledge Base for {username}...")
94
+ snapshot_download(
95
+ repo_id=DATASET_REPO_ID,
96
+ repo_type="dataset",
97
+ allow_patterns=[f"{user_db_path}/*"],
98
+ local_dir=target_dir,
99
+ token=HF_TOKEN
100
+ )
101
+ print("✅ User Knowledge Base Restored.")
102
+ except Exception as e:
103
+ print(f"⚠️ New user or sync error: {e}")
104
+
105
+ def upload_user_db(username):
106
+ """Backs up ONLY the specific user's Knowledge Base."""
107
+ if not HF_TOKEN: return
108
+
109
+ target_dir = os.path.dirname(os.path.abspath(__file__))
110
+ user_db_rel_path = os.path.join(CHROMA_ROOT, username)
111
+ user_db_abs_path = os.path.join(target_dir, user_db_rel_path)
112
+
113
+ if not os.path.exists(user_db_abs_path):
114
+ return
115
+
116
+ try:
117
+ api = HfApi(token=HF_TOKEN)
118
+ api.upload_folder(
119
+ folder_path=user_db_abs_path,
120
+ path_in_repo=user_db_rel_path,
121
+ repo_id=DATASET_REPO_ID,
122
+ repo_type="dataset",
123
+ commit_message=f"KB Update ({username}): {datetime.now(TIMEZONE)}"
124
+ )
125
+ print(f"✅ Knowledge Base Saved for {username}.")
126
+ except Exception as e:
127
+ print(f"⚠️ DB sync failed: {e}")
128
+
129
+ # --- AUTHENTICATION ---
130
+ def check_login():
131
+ # 1. Cached Download
132
+ download_config_if_needed()
133
+
134
+ try:
135
+ config_path = get_config_path()
136
+ if not config_path.exists():
137
+ st.error(f"🚨 CRITICAL: Config not found at {config_path}")
138
+ return False
139
+
140
+ with open(config_path) as file:
141
+ config = yaml.load(file, Loader=SafeLoader)
142
+ except Exception as e:
143
+ st.error(f"🚨 Config Error: {e}")
144
+ return False
145
+
146
+ authenticator = stauth.Authenticate(
147
+ config['credentials'],
148
+ config['cookie']['name'],
149
+ config['cookie']['key'],
150
+ config['cookie']['expiry_days']
151
+ )
152
+
153
+ authenticator.login(location='main')
154
+
155
+ if st.session_state["authentication_status"]:
156
+ username = st.session_state["username"]
157
+ try:
158
+ user_data = config['credentials']['usernames'].get(username, {})
159
+ user_roles = user_data.get('roles', [])
160
+ except Exception as e:
161
+ user_roles = []
162
+
163
+ st.session_state.roles = user_roles
164
+ st.session_state.username = username
165
+ st.session_state.name = st.session_state.get("name")
166
+ st.session_state.authenticator = authenticator
167
+ return True
168
+
169
+ elif st.session_state["authentication_status"] is False:
170
+ st.error('Username/password is incorrect')
171
+ return False
172
+ elif st.session_state["authentication_status"] is None:
173
+ st.warning('Please enter your username and password')
174
+ return False
175
+
176
+ # --- REGISTRATION ---
177
+ def register_user(new_email, new_username, new_name, new_password, invite_code):
178
+ if invite_code != INVITE_CODE:
179
+ return False, "Invalid Invite Code."
180
+
181
+ # Ensure we have the latest config before writing
182
+ download_config_if_needed()
183
+ config_path = get_config_path()
184
+
185
+ # Lock the file for reading/writing
186
+ # (The Scheduler handles the cloud sync, but we need to handle local consistency)
187
+ with scheduler.lock:
188
+ with open(config_path) as file:
189
+ config = yaml.load(file, Loader=SafeLoader)
190
+
191
+ if new_username in config['credentials']['usernames']:
192
+ return False, "Username already exists."
193
+
194
+ hashed_bytes = bcrypt.hashpw(new_password.encode('utf-8'), bcrypt.gensalt())
195
+ hashed_pwd = hashed_bytes.decode('utf-8')
196
+
197
+ new_user_entry = {
198
+ "email": new_email,
199
+ "name": new_name,
200
+ "password": hashed_pwd,
201
+ "roles": ["user"]
202
+ }
203
+
204
+ config['credentials']['usernames'][new_username] = new_user_entry
205
+
206
+ with open(config_path, 'w') as file:
207
+ yaml.dump(config, file, default_flow_style=False)
208
+
209
+ return True, "Account created! Please log in."
210
+
211
+ # --- LOGGING ---
212
+ def log_usage(model_name, input_tokens, output_tokens):
213
+ ensure_log_exists()
214
+ log_path = get_log_path()
215
+
216
+ username = st.session_state.get("username", "anonymous")
217
+ now_est = datetime.now(TIMEZONE)
218
+ today = now_est.strftime("%Y-%m-%d")
219
+
220
+ # Scheduler Lock guarantees atomic writes locally
221
+ with scheduler.lock:
222
+ data = {}
223
+ if log_path.exists():
224
+ with open(log_path, "r") as f:
225
+ try:
226
+ data = json.load(f)
227
+ except:
228
+ data = {}
229
+
230
+ if today not in data:
231
+ data[today] = {"total_tokens": 0, "users": {}}
232
+
233
+ if username not in data[today]["users"]:
234
+ data[today]["users"][username] = {"input": 0, "output": 0, "calls": 0}
235
+
236
+ data[today]["total_tokens"] += (input_tokens + output_tokens)
237
+ data[today]["users"][username]["input"] += input_tokens
238
+ data[today]["users"][username]["output"] += output_tokens
239
+ data[today]["users"][username]["calls"] += 1
240
+
241
+ with open(log_path, "w") as f:
242
+ json.dump(data, f, indent=2)
243
+
244
+ # No need to call upload_file() manually!
245
+ # The Scheduler detects the file change and uploads it automatically.
246
+
247
+ def get_daily_stats():
248
+ ensure_log_exists()
249
+ log_path = get_log_path()
250
+
251
+ now_est = datetime.now(TIMEZONE)
252
+ today = now_est.strftime("%Y-%m-%d")
253
+
254
+ if log_path.exists():
255
+ with open(log_path, "r") as f:
256
+ try:
257
+ data = json.load(f)
258
+ if today in data:
259
+ return data[today]
260
+ except:
261
+ pass
262
+ return {"total_tokens": 0, "users": {}}