NavyDevilDoc commited on
Commit
19f1b5a
·
verified ·
1 Parent(s): 10e6f84

Update src/app.py

Browse files
Files changed (1) hide show
  1. src/app.py +138 -291
src/app.py CHANGED
@@ -9,6 +9,9 @@ import zipfile
9
  import tracker
10
  import rag_engine
11
  import doc_loader
 
 
 
12
  from openai import OpenAI
13
  from datetime import datetime
14
  from test_integration import run_tests
@@ -27,105 +30,77 @@ if "roles" not in st.session_state:
27
 
28
  if "quiz_state" not in st.session_state:
29
  st.session_state.quiz_state = {
30
- "active": False, # Is a question currently displayed?
31
- "question_data": None, # The current acronym/doc object
32
- "user_answer": "", # What the user typed
33
- "feedback": None, # The LLM's grading response
34
- "streak": 0, # Fun gamification metric
35
- "generated_question_text": ""
36
  }
37
 
38
- # NEW: Quiz History for Study Guide
39
- if "quiz_history" not in st.session_state:
40
- st.session_state.quiz_history = []
41
 
42
- if "active_index" not in st.session_state:
43
- st.session_state.active_index = None
 
44
 
45
- # --- FLATTENER LOGIC (Integrated) ---
46
  class OutlineProcessor:
47
  """Parses text outlines for the Flattener tool."""
48
- def __init__(self, file_content):
49
- self.raw_lines = file_content.split('\n')
50
-
51
- def _is_list_item(self, line):
52
- pattern = r"^\s*(\d+\.|[a-zA-Z]\.|-|\*)\s+"
53
- return bool(re.match(pattern, line))
54
-
55
  def _merge_multiline_items(self):
56
  merged_lines = []
57
  for line in self.raw_lines:
58
  stripped = line.strip()
59
  if not stripped: continue
60
- if not merged_lines:
61
- merged_lines.append(line)
62
- continue
63
- if not self._is_list_item(line):
64
- merged_lines[-1] = merged_lines[-1].rstrip() + " " + stripped
65
- else:
66
- merged_lines.append(line)
67
  return merged_lines
68
-
69
  def parse(self):
70
  clean_lines = self._merge_multiline_items()
71
- stack = []
72
- results = []
73
  for line in clean_lines:
74
  stripped = line.strip()
75
  indent = len(line) - len(line.lstrip())
76
- while stack and stack[-1]['indent'] >= indent:
77
- stack.pop()
78
  stack.append({'indent': indent, 'text': stripped})
79
- if len(stack) > 1:
80
- context_str = " > ".join([item['text'] for item in stack[:-1]])
81
- else:
82
- context_str = "ROOT"
83
  results.append({"context": context_str, "target": stripped})
84
  return results
85
 
86
- # --- HELPER FUNCTIONS ---
87
  def query_model_universal(messages, max_tokens, model_choice, user_key=None):
88
  """Unified router for both Chat and Tools."""
 
 
 
 
 
89
  if "GPT-4o" in model_choice:
90
  key = user_key if user_key else OPENAI_KEY
91
  if not key: return "[Error: No OpenAI API Key]", None
92
-
93
  client = OpenAI(api_key=key)
94
  try:
95
- resp = client.chat.completions.create(
96
- model="gpt-4o", max_tokens=max_tokens, messages=messages, temperature=0.3
97
- )
98
  usage = {"input": resp.usage.prompt_tokens, "output": resp.usage.completion_tokens}
99
  return resp.choices[0].message.content, usage
100
- except Exception as e:
101
- return f"[OpenAI Error: {e}]", None
102
  else:
103
- model_map = {
104
- "Granite 4 (IBM)": "granite4:latest",
105
- "Llama 3.2 (Meta)": "llama3.2:latest",
106
- "Gemma 3 (Google)": "gemma3:latest"
107
- }
108
  tech_name = model_map.get(model_choice)
109
  if not tech_name: return "[Error: Model Map Failed]", None
110
-
111
  url = f"{API_URL_ROOT}/generate"
112
-
113
- hist = ""
114
- sys_msg = "You are a helpful assistant."
115
  for m in messages:
116
  if m['role']=='system': sys_msg = m['content']
117
  elif m['role']=='user': hist += f"User: {m['content']}\n"
118
  elif m['role']=='assistant': hist += f"Assistant: {m['content']}\n"
119
  hist += "Assistant: "
120
-
121
  try:
122
  r = requests.post(url, json={"text": hist, "persona": sys_msg, "max_tokens": max_tokens, "model": tech_name}, timeout=600)
123
  if r.status_code == 200:
124
  d = r.json()
125
  return d.get("response", ""), d.get("usage", {"input":0,"output":0})
126
  return f"[Local Error {r.status_code}]", None
127
- except Exception as e:
128
- return f"[Conn Error: {e}]", None
129
 
130
  def update_sidebar_metrics():
131
  if metric_placeholder:
@@ -134,15 +109,9 @@ def update_sidebar_metrics():
134
  metric_placeholder.metric("My Tokens Today", u_stats["input"] + u_stats["output"])
135
 
136
  def generate_study_guide_md(history):
137
- """Converts quiz history to a Markdown string."""
138
- md = "# ⚓ Study Guide\n\n"
139
- md += f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M')}\n\n"
140
  for item in history:
141
- md += f"## Q: {item['question']}\n"
142
- md += f"**Your Answer:** {item['user_answer']}\n\n"
143
- md += f"**Grade:** {item['grade']}\n\n"
144
- md += f"**Context/Correct Info:**\n> {item['context']}\n\n"
145
- md += "---\n\n"
146
  return md
147
 
148
  # --- LOGIN ---
@@ -159,10 +128,8 @@ if "authentication_status" not in st.session_state or st.session_state["authenti
159
  with register_tab:
160
  st.header("Create Account")
161
  with st.form("reg_form"):
162
- new_user = st.text_input("Username")
163
- new_name = st.text_input("Display Name")
164
- new_email = st.text_input("Email")
165
- new_pwd = st.text_input("Password", type="password")
166
  invite = st.text_input("Invitation Passcode")
167
  if st.form_submit_button("Register"):
168
  success, msg = tracker.register_user(new_email, new_user, new_name, new_pwd, invite)
@@ -177,15 +144,12 @@ with st.sidebar:
177
  st.write(f"Welcome, **{st.session_state.name}**")
178
  st.header("📊 Usage Tracker")
179
  metric_placeholder = st.empty()
 
 
180
  if "admin" in st.session_state.roles:
181
- st.divider()
182
- st.header("🛡️ Admin Tools")
183
- log_path = tracker.get_log_path()
184
- if log_path.exists():
185
- with open(log_path, "r") as f: log_data = f.read()
186
- st.download_button("📥 Download Usage Logs", log_data, f"usage_log_{datetime.now().strftime('%Y-%m-%d')}.json", "application/json")
187
  st.divider()
188
-
189
  st.header("🌲 Pinecone Settings")
190
  pc_key = os.getenv("PINECONE_API_KEY")
191
  if pc_key:
@@ -193,39 +157,27 @@ with st.sidebar:
193
  indexes = pm.list_indexes()
194
  selected_index = st.selectbox("Active Index", indexes)
195
  st.session_state.active_index = selected_index
196
-
197
- # 2. SAFETY CHECK VISUAL (FIXED)
198
  if selected_index:
199
- # Check if the user has already selected a model; default to MiniLM if not
200
  current_model = st.session_state.get("active_embed_model", "sentence-transformers/all-MiniLM-L6-v2")
201
  try:
202
  emb_fn = rag_engine.get_embedding_func(current_model)
203
  test_vec = emb_fn.embed_query("test")
204
  active_model_dim = len(test_vec)
205
- is_compatible = pm.check_dimension_compatibility(selected_index, active_model_dim)
206
- if is_compatible:
207
- st.caption(f" Compatible with Model ({active_model_dim}d)")
208
- else:
209
- st.error(f"❌ Mismatch! Model is {active_model_dim}d, Index is not.")
210
- except Exception as e:
211
- st.caption(f"⚠️ Could not verify dims: {e}")
212
-
213
  with st.expander("Create New Index"):
214
  new_idx_name = st.text_input("Index Name")
215
- new_idx_dim = st.selectbox("Vector Dimension", [384, 768, 1024, 1536, 3072], index=0)
216
  if st.button("Create"):
217
- with st.spinner("Provisioning Cloud Index..."):
218
  ok, msg = pm.create_index(new_idx_name, dimension=new_idx_dim)
219
- if ok:
220
- st.success(msg)
221
- time.sleep(2)
222
- st.rerun()
223
  else: st.error(msg)
224
- else:
225
- st.warning("No Pinecone Key Found")
226
 
227
  st.header("🧠 Intelligence")
228
- st.subheader("1. Embeddings (The Memory)")
229
  embed_options = {
230
  "Standard (All-MiniLM, 384d)": "sentence-transformers/all-MiniLM-L6-v2",
231
  "High-Perf (MPNet, 768d)": "sentence-transformers/all-mpnet-base-v2",
@@ -234,27 +186,17 @@ with st.sidebar:
234
  embed_choice_label = st.selectbox("Select Embedding Model", list(embed_options.keys()))
235
  st.session_state.active_embed_model = embed_options[embed_choice_label]
236
 
237
- st.subheader("2. Chat Model (The Brain)")
238
- model_map = {
239
- "Granite 4 (IBM)": "granite4:latest",
240
- "Llama 3.2 (Meta)": "llama3.2:latest",
241
- "Gemma 3 (Google)": "gemma3:latest"
242
- }
243
  opts = list(model_map.keys())
244
- model_captions = ["Slower, free, private" for _ in opts]
245
  is_admin = "admin" in st.session_state.roles
246
  user_key = None
247
  if not is_admin:
248
- user_key = st.text_input("🔓 Unlock GPT-4o (Enter API Key)", type="password", key=f"key_{st.session_state.username}")
249
- if user_key:
250
- st.session_state.user_openai_key = user_key
251
- st.caption("✅ Key Active")
252
- else: st.session_state.user_openai_key = None
253
  else: st.session_state.user_openai_key = None
254
- if is_admin or st.session_state.get("user_openai_key"):
255
- opts.append("GPT-4o (Omni)")
256
- model_captions.append("Fast, smart, sends data to OpenAI")
257
- model_choice = st.radio("Select Model:", opts, captions=model_captions, key="model_selector_radio")
258
  st.info(f"Connected to: **{model_choice}**")
259
  st.divider()
260
  if st.session_state.authenticator: st.session_state.authenticator.logout(location='sidebar')
@@ -269,6 +211,10 @@ tab1, tab2, tab3 = st.tabs(["💬 Chat Playground", "📂 Knowledge & Tools", "
269
  with tab1:
270
  st.header("Discussion & Analysis")
271
  if "messages" not in st.session_state: st.session_state.messages = []
 
 
 
 
272
  c1, c2 = st.columns([3, 1])
273
  with c1: st.caption(f"Active Model: **{st.session_state.get('model_selector_radio', 'Granite')}**")
274
  with c2: use_rag = st.toggle("Enable Knowledge Base", value=False)
@@ -279,6 +225,8 @@ with tab1:
279
  with st.chat_message("user"): st.markdown(prompt)
280
  context_txt = ""
281
  sys_p = "You are a helpful AI assistant."
 
 
282
  if use_rag:
283
  if not st.session_state.active_index: st.error("⚠️ Please select an Active Index in the sidebar first.")
284
  else:
@@ -290,19 +238,16 @@ with tab1:
290
  embed_model_name=st.session_state.active_embed_model
291
  )
292
  if docs:
293
- sys_p = (
294
- "You are a Navy Document Analyst. "
295
- "You must answer the user's question based PRIMARILY on the provided Context. "
296
- "If the Context contains the answer, output it clearly. "
297
- "If the Context does NOT contain the answer, simply state: "
298
- "'I cannot find that specific information in the documents provided.'"
299
- )
300
  for i, d in enumerate(docs):
301
  src = d.metadata.get('source', 'Unknown')
302
  context_txt += f"<document index='{i+1}' source='{src}'>\n{d.page_content}\n</document>\n"
 
 
303
  if context_txt:
304
  final_prompt = f"User Question: {prompt}\n\n<context>\n{context_txt}\n</context>\n\nInstruction: Answer using the context above."
305
  else: final_prompt = prompt
 
306
  with st.chat_message("assistant"):
307
  with st.spinner("Thinking..."):
308
  hist = [{"role":"system", "content":sys_p}] + st.session_state.messages[-6:-1] + [{"role":"user", "content":final_prompt}]
@@ -320,49 +265,40 @@ with tab1:
320
  with tab2:
321
  st.header("Document Processor")
322
  c1, c2 = st.columns([1, 1])
323
- with c1: uploaded_file = st.file_uploader("Upload File (PDF, PPT, Doc, Text)", type=["pdf", "docx", "pptx", "txt", "md"])
324
  with c2:
325
- use_vision = st.toggle("👁️ Enable Vision Mode", help="Use GPT-4o to read diagrams/tables.")
326
- if use_vision and "GPT-4o" not in opts: st.warning("Vision requires OpenAI Access.")
327
-
328
  if uploaded_file:
329
  temp_path = rag_engine.save_uploaded_file(uploaded_file, st.session_state.username)
330
  col_a, col_b, col_c = st.columns(3)
331
  with col_a:
332
- chunk_strategy = st.selectbox("Chunking Strategy", ["paragraph", "token"], key="chunk_selector")
333
- if st.button("📥 Add to Knowledge Base", type="primary"):
334
- if not st.session_state.active_index: st.error("Please select an Active Index.")
335
  else:
336
  with st.spinner("Ingesting..."):
337
- ok, msg = rag_engine.ingest_file(
338
- file_path=temp_path,
339
- username=st.session_state.username,
340
- index_name=st.session_state.active_index,
341
- strategy=chunk_strategy,
342
- embed_model_name=st.session_state.active_embed_model
343
- )
344
- if ok:
345
- tracker.upload_user_db(st.session_state.username)
346
- st.success(msg)
347
  else: st.error(msg)
348
  with col_b:
349
  st.write(""); st.write("")
350
- if st.button("📝 Summarize Document"):
351
- with st.spinner("Reading..."):
352
  key = st.session_state.get("user_openai_key") or OPENAI_KEY
353
  class FileObj:
354
  def __init__(self, p, n): self.path=p; self.name=n
355
  def read(self):
356
  with open(self.path, "rb") as f: return f.read()
357
  raw = doc_loader.extract_text_from_file(FileObj(temp_path, uploaded_file.name), use_vision=use_vision, api_key=key)
358
- prompt = f"Summarize this document:\n\n{raw[:20000]}"
359
  msgs = [{"role":"user", "content": prompt}]
360
  summ, usage = query_model_universal(msgs, 1000, model_choice, st.session_state.get("user_openai_key"))
361
- st.subheader("Summary Result"); st.markdown(summ)
362
  with col_c:
363
  st.write(""); st.write("")
364
  if "flattened_result" not in st.session_state: st.session_state.flattened_result = None
365
- if st.button("📄 Flatten Context"):
366
  with st.spinner("Flattening..."):
367
  key = st.session_state.get("user_openai_key") or OPENAI_KEY
368
  with open(temp_path, "rb") as f:
@@ -370,232 +306,147 @@ with tab2:
370
  def __init__(self, data, n): self.data=data; self.name=n
371
  def read(self): return self.data
372
  raw = doc_loader.extract_text_from_file(Wrapper(f.read(), uploaded_file.name), use_vision=use_vision, api_key=key)
373
- proc = OutlineProcessor(raw)
374
- items = proc.parse()
375
- out_txt = []
376
- bar = st.progress(0)
377
  for i, item in enumerate(items):
378
  p = f"Context: {item['context']}\nTarget: {item['target']}\nRewrite as one sentence."
379
  m = [{"role":"user", "content": p}]
380
  res, _ = query_model_universal(m, 300, model_choice, st.session_state.get("user_openai_key"))
381
- out_txt.append(res)
382
- bar.progress((i+1)/len(items))
383
  final_flattened_text = "\n".join(out_txt)
384
  st.session_state.flattened_result = {"text": final_flattened_text, "source": f"{uploaded_file.name}_flat"}
385
  st.rerun()
386
  if st.session_state.flattened_result:
387
  res = st.session_state.flattened_result
388
- st.success("Flattening Complete!")
389
- st.text_area("Result", res["text"], height=200)
390
- if st.button("📥 Index This Flattened Version"):
391
- if not st.session_state.active_index: st.error("Please select an Active Index.")
392
  else:
393
  with st.spinner("Indexing..."):
394
- ok, msg = rag_engine.process_and_add_text(
395
- text=res["text"],
396
- source_name=res["source"],
397
- username=st.session_state.username,
398
- index_name=st.session_state.active_index
399
- )
400
- if ok:
401
- tracker.upload_user_db(st.session_state.username)
402
- st.success(msg)
403
  else: st.error(msg)
404
  st.divider()
405
  st.subheader("Database Management")
406
- col_db_1, col_db_2 = st.columns([2, 1])
407
- with col_db_1: st.info("If Quiz Mode is failing, your local files might be missing.")
408
- with col_db_2:
409
  if st.button("🔄 Resync from Pinecone"):
410
- if not st.session_state.active_index: st.error("Select Index first.")
411
  else:
412
- with st.spinner("Downloading memories..."):
413
  ok, msg = rag_engine.rebuild_cache_from_pinecone(st.session_state.username, st.session_state.active_index)
414
  if ok: st.success(msg); time.sleep(1); st.rerun()
415
  else: st.error(msg)
416
- st.divider()
417
  docs = rag_engine.list_documents(st.session_state.username)
418
  if docs:
419
  for d in docs:
420
  c1, c2 = st.columns([4,1])
421
- c1.text(f"📄 {d['filename']} (Cached)")
422
  if c2.button("🗑️", key=d['source']):
423
- if not st.session_state.active_index: st.error("Select Index first.")
424
  else:
425
  rag_engine.delete_document(st.session_state.username, d['source'], st.session_state.active_index)
426
- tracker.upload_user_db(st.session_state.username)
427
- st.rerun()
428
- else: st.warning("Local Cache Empty. Click 'Resync' above if you have data in Pinecone.")
429
 
430
  # === TAB 3: QUIZ MODE ===
431
  with tab3:
432
  st.header("⚓ Qualification Board Simulator")
433
 
434
- # 1. MODE SELECTION & RESET LOGIC
435
- col_mode, col_streak = st.columns([3, 1])
436
- with col_mode:
437
- quiz_mode = st.radio("Select Quiz Mode:", ["⚡ Acronym Lightning Round", "📖 Document Deep Dive"], horizontal=True)
438
 
439
- # New: Focus Topic Input
440
- if "Document" in quiz_mode:
441
- focus_topic = st.text_input("🎯 Focus Topic (Optional)", placeholder="e.g., PPBE, Shipyards, Radar...", help="Leave empty for random questions.")
442
- else:
443
- focus_topic = None
444
 
445
  if "last_quiz_mode" not in st.session_state: st.session_state.last_quiz_mode = quiz_mode
446
  if "quiz_trigger" not in st.session_state: st.session_state.quiz_trigger = False
447
-
448
  if st.session_state.last_quiz_mode != quiz_mode:
449
- st.session_state.quiz_state["active"] = False
450
- st.session_state.quiz_state["question_data"] = None
451
- st.session_state.quiz_state["feedback"] = None
452
- st.session_state.quiz_state["generated_question_text"] = ""
453
- st.session_state.last_quiz_mode = quiz_mode
454
- st.rerun()
455
 
456
- quiz = QuizEngine()
457
- qs = st.session_state.quiz_state
458
-
459
- with col_streak:
460
- st.metric("Streak", qs["streak"])
461
- if st.button("Reset"): qs["streak"] = 0
462
-
463
- # New: Study Guide Download
464
  if st.session_state.quiz_history:
465
- with st.expander(f"📚 Review Study Guide ({len(st.session_state.quiz_history)} items)"):
466
- st.download_button(
467
- "📥 Download Markdown",
468
- generate_study_guide_md(st.session_state.quiz_history),
469
- f"StudyGuide_{datetime.now().strftime('%Y%m%d')}.md"
470
- )
471
-
472
  st.divider()
473
 
474
  def generate_question():
475
- with st.spinner("Consulting the Board..."):
 
476
  if "Acronym" in quiz_mode:
477
  q_data = quiz.get_random_acronym()
478
- if q_data:
479
- qs["active"] = True; qs["question_data"] = q_data; qs["feedback"] = None; qs["generated_question_text"] = q_data["question"]
480
- else: st.error("No acronyms found! Run the extractor first.")
481
  else:
482
- valid_question_found = False
483
- attempts = 0
484
- last_error = None
485
-
486
  while not valid_question_found and attempts < 5:
487
  attempts += 1
488
  q_ctx = quiz.get_document_context(st.session_state.username, topic_filter=focus_topic)
489
-
490
- # ERROR HANDLING SWITCH
491
- if q_ctx and "error" in q_ctx:
492
- last_error = q_ctx["error"]
493
- break # Stop trying, we know why it failed (e.g., Topic not found)
494
-
495
  if q_ctx:
496
  prompt = quiz.construct_question_generation_prompt(q_ctx["context_text"])
497
- question_text, usage = query_model_universal([{"role": "user", "content": prompt}], 300, model_choice, st.session_state.get("user_openai_key"))
 
498
 
 
499
  if "UNABLE" not in question_text and len(question_text) > 10:
500
- valid_question_found = True; qs["active"] = True; qs["question_data"] = q_ctx; qs["generated_question_text"] = question_text; qs["feedback"] = None
501
-
502
  if not valid_question_found:
503
- # SPECIFIC USER FEEDBACK
504
- if last_error == "topic_not_found":
505
- st.warning(f"Could not find any documents containing the topic: **'{focus_topic}'**. \n\nCheck your spelling or try a broader term.")
506
- elif focus_topic:
507
- st.warning(f"Found documents with '{focus_topic}', but the AI struggled to form a question. Try again or check the document content.")
508
- else:
509
- st.warning("Could not generate a question. Your 'Knowledge & Tools' local cache might be empty or corrupted. \n\n**Try clicking '🔄 Resync from Pinecone' in Tab 2.**")
510
-
511
- if st.session_state.quiz_trigger:
512
- st.session_state.quiz_trigger = False
513
- generate_question()
514
- st.rerun()
515
 
 
516
  if not qs["active"]:
517
- if st.button("🚀 Generate New Question", type="primary"):
518
- generate_question()
519
- st.rerun()
520
 
521
  if qs["active"]:
522
  st.markdown(f"### {qs['generated_question_text']}")
523
  if "document" in qs.get("question_data", {}).get("type", ""): st.caption(f"Source: *{qs['question_data']['source_file']}*")
524
-
525
  with st.form(key="quiz_response"):
526
- user_ans = st.text_area("Your Answer:")
527
- sub = st.form_submit_button("Submit Answer")
528
-
529
  if sub and user_ans:
530
- with st.spinner("Consulting the Knowledge Base & Grading..."):
531
  data = qs["question_data"]
532
-
533
- # Grading Logic Branch
534
  if data["type"] == "acronym":
535
- prompt = quiz.construct_acronym_grading_prompt(
536
- data["term"], data["correct_definition"], user_ans
537
- )
538
- # For acronyms, the context is just the definition
539
  final_context_for_history = data["correct_definition"]
540
-
541
  else:
542
- # --- RAG ENHANCEMENT START ---
543
- # 1. Start with the original seed text
544
- combined_context = f"--- PRIMARY SOURCE (SEED) ---\n{data['context_text']}\n\n"
545
-
546
- # 2. Search Pinecone for 5 more relevant chunks using the generated question
547
- # We check if index/model are active first
548
  if st.session_state.active_index and st.session_state.get("active_embed_model"):
549
  try:
550
- # We search for K=10 and Rerank to Top 5 (or just take Top 5 if no reranker)
551
- # Using the helper function from rag_engine
552
  related_docs = rag_engine.search_knowledge_base(
553
  query=qs["generated_question_text"],
554
  username=st.session_state.username,
555
  index_name=st.session_state.active_index,
556
  embed_model_name=st.session_state.active_embed_model,
557
- k=15, # Fetch a broad net
558
- final_k=5 # Narrow down to Top 5 most relevant
559
  )
560
-
561
  if related_docs:
562
- combined_context += "--- RELATED DOCUMENTATION (RETRIEVED) ---\n"
563
- for i, doc in enumerate(related_docs):
564
- combined_context += f"[Source {i+1}]: {doc.page_content}\n\n"
565
- except Exception as e:
566
- # If search fails, we just proceed with the primary source
567
- print(f"Grading Search Failed: {e}")
568
-
569
- # 3. Construct the Prompt with the super-context
570
- prompt = quiz.construct_grading_prompt(
571
- qs["generated_question_text"], user_ans, combined_context
572
- )
573
 
574
- # Save this rich context for the Study Guide
575
  final_context_for_history = combined_context
576
- # --- RAG ENHANCEMENT END ---
 
577
 
578
- # Call LLM
579
  msgs = [{"role": "user", "content": prompt}]
580
- grade, _ = query_model_universal(
581
- msgs, 1000, model_choice, st.session_state.get("user_openai_key")
582
- )
583
-
584
  qs["feedback"] = grade
585
-
586
- # Update Streak
587
  is_pass = "PASS" in grade
588
  if is_pass: qs["streak"] += 1
589
  elif "FAIL" in grade: qs["streak"] = 0
590
-
591
- # Save to History (Using the enhanced context!)
592
- st.session_state.quiz_history.append({
593
- "question": qs["generated_question_text"],
594
- "user_answer": user_ans,
595
- "grade": "PASS" if is_pass else "FAIL",
596
- "context": final_context_for_history
597
- })
598
-
599
  st.rerun()
600
 
601
  if qs["feedback"]:
@@ -603,15 +454,11 @@ with tab3:
603
  if "PASS" in qs["feedback"]: st.success("✅ CORRECT")
604
  else:
605
  if "FAIL" in qs["feedback"]: st.error("❌ INCORRECT")
606
- else: st.warning("⚠️ PARTIAL / COMMENTARY")
607
  st.markdown(qs["feedback"])
608
-
609
  data = qs["question_data"]
610
- if data["type"] == "acronym": st.info(f"**Official Definition:** {data['correct_definition']}")
611
  elif data["type"] == "document":
612
- with st.expander("Show Source Text (Answer Key)"): st.info(data["context_text"])
613
-
614
  if st.button("Next Question ➡️"):
615
- st.session_state.quiz_trigger = True
616
- qs["active"] = False; qs["question_data"] = None; qs["feedback"] = None
617
- st.rerun()
 
9
  import tracker
10
  import rag_engine
11
  import doc_loader
12
+ # NEW IMPORT: Modular Admin
13
+ import modules.admin_panel as admin_panel
14
+
15
  from openai import OpenAI
16
  from datetime import datetime
17
  from test_integration import run_tests
 
30
 
31
  if "quiz_state" not in st.session_state:
32
  st.session_state.quiz_state = {
33
+ "active": False, "question_data": None, "user_answer": "",
34
+ "feedback": None, "streak": 0, "generated_question_text": ""
 
 
 
 
35
  }
36
 
37
+ if "quiz_history" not in st.session_state: st.session_state.quiz_history = []
38
+ if "active_index" not in st.session_state: st.session_state.active_index = None
 
39
 
40
+ # NEW: Debug State Variables
41
+ if "last_prompt_sent" not in st.session_state: st.session_state.last_prompt_sent = ""
42
+ if "last_context_used" not in st.session_state: st.session_state.last_context_used = ""
43
 
44
+ # --- HELPER FUNCTIONS ---
45
  class OutlineProcessor:
46
  """Parses text outlines for the Flattener tool."""
47
+ def __init__(self, file_content): self.raw_lines = file_content.split('\n')
48
+ def _is_list_item(self, line): return bool(re.match(r"^\s*(\d+\.|[a-zA-Z]\.|-|\*)\s+", line))
 
 
 
 
 
49
  def _merge_multiline_items(self):
50
  merged_lines = []
51
  for line in self.raw_lines:
52
  stripped = line.strip()
53
  if not stripped: continue
54
+ if not merged_lines: merged_lines.append(line); continue
55
+ if not self._is_list_item(line): merged_lines[-1] = merged_lines[-1].rstrip() + " " + stripped
56
+ else: merged_lines.append(line)
 
 
 
 
57
  return merged_lines
 
58
  def parse(self):
59
  clean_lines = self._merge_multiline_items()
60
+ stack = []; results = []
 
61
  for line in clean_lines:
62
  stripped = line.strip()
63
  indent = len(line) - len(line.lstrip())
64
+ while stack and stack[-1]['indent'] >= indent: stack.pop()
 
65
  stack.append({'indent': indent, 'text': stripped})
66
+ context_str = " > ".join([item['text'] for item in stack[:-1]]) if len(stack) > 1 else "ROOT"
 
 
 
67
  results.append({"context": context_str, "target": stripped})
68
  return results
69
 
 
70
  def query_model_universal(messages, max_tokens, model_choice, user_key=None):
71
  """Unified router for both Chat and Tools."""
72
+ # CAPTURE FOR DEBUGGING
73
+ # We grab the last user message as the "Prompt"
74
+ if messages and messages[-1]['role'] == 'user':
75
+ st.session_state.last_prompt_sent = messages[-1]['content']
76
+
77
  if "GPT-4o" in model_choice:
78
  key = user_key if user_key else OPENAI_KEY
79
  if not key: return "[Error: No OpenAI API Key]", None
 
80
  client = OpenAI(api_key=key)
81
  try:
82
+ resp = client.chat.completions.create(model="gpt-4o", max_tokens=max_tokens, messages=messages, temperature=0.3)
 
 
83
  usage = {"input": resp.usage.prompt_tokens, "output": resp.usage.completion_tokens}
84
  return resp.choices[0].message.content, usage
85
+ except Exception as e: return f"[OpenAI Error: {e}]", None
 
86
  else:
87
+ model_map = {"Granite 4 (IBM)": "granite4:latest", "Llama 3.2 (Meta)": "llama3.2:latest", "Gemma 3 (Google)": "gemma3:latest"}
 
 
 
 
88
  tech_name = model_map.get(model_choice)
89
  if not tech_name: return "[Error: Model Map Failed]", None
 
90
  url = f"{API_URL_ROOT}/generate"
91
+ hist = ""; sys_msg = "You are a helpful assistant."
 
 
92
  for m in messages:
93
  if m['role']=='system': sys_msg = m['content']
94
  elif m['role']=='user': hist += f"User: {m['content']}\n"
95
  elif m['role']=='assistant': hist += f"Assistant: {m['content']}\n"
96
  hist += "Assistant: "
 
97
  try:
98
  r = requests.post(url, json={"text": hist, "persona": sys_msg, "max_tokens": max_tokens, "model": tech_name}, timeout=600)
99
  if r.status_code == 200:
100
  d = r.json()
101
  return d.get("response", ""), d.get("usage", {"input":0,"output":0})
102
  return f"[Local Error {r.status_code}]", None
103
+ except Exception as e: return f"[Conn Error: {e}]", None
 
104
 
105
  def update_sidebar_metrics():
106
  if metric_placeholder:
 
109
  metric_placeholder.metric("My Tokens Today", u_stats["input"] + u_stats["output"])
110
 
111
  def generate_study_guide_md(history):
112
+ md = "# Study Guide\n\nGenerated: " + datetime.now().strftime('%Y-%m-%d %H:%M') + "\n\n"
 
 
113
  for item in history:
114
+ md += f"## Q: {item['question']}\n**Your Answer:** {item['user_answer']}\n\n**Grade:** {item['grade']}\n\n**Context/Correct Info:**\n> {item['context']}\n\n---\n\n"
 
 
 
 
115
  return md
116
 
117
  # --- LOGIN ---
 
128
  with register_tab:
129
  st.header("Create Account")
130
  with st.form("reg_form"):
131
+ new_user = st.text_input("Username"); new_name = st.text_input("Display Name")
132
+ new_email = st.text_input("Email"); new_pwd = st.text_input("Password", type="password")
 
 
133
  invite = st.text_input("Invitation Passcode")
134
  if st.form_submit_button("Register"):
135
  success, msg = tracker.register_user(new_email, new_user, new_name, new_pwd, invite)
 
144
  st.write(f"Welcome, **{st.session_state.name}**")
145
  st.header("📊 Usage Tracker")
146
  metric_placeholder = st.empty()
147
+
148
+ # NEW: Modular Admin Integration
149
  if "admin" in st.session_state.roles:
150
+ admin_panel.render_admin_sidebar()
151
+
 
 
 
 
152
  st.divider()
 
153
  st.header("🌲 Pinecone Settings")
154
  pc_key = os.getenv("PINECONE_API_KEY")
155
  if pc_key:
 
157
  indexes = pm.list_indexes()
158
  selected_index = st.selectbox("Active Index", indexes)
159
  st.session_state.active_index = selected_index
 
 
160
  if selected_index:
 
161
  current_model = st.session_state.get("active_embed_model", "sentence-transformers/all-MiniLM-L6-v2")
162
  try:
163
  emb_fn = rag_engine.get_embedding_func(current_model)
164
  test_vec = emb_fn.embed_query("test")
165
  active_model_dim = len(test_vec)
166
+ if pm.check_dimension_compatibility(selected_index, active_model_dim): st.caption(f"✅ Compatible ({active_model_dim}d)")
167
+ else: st.error(f"❌ Mismatch! Model: {active_model_dim}d")
168
+ except Exception as e: st.caption(f"⚠️ Check failed: {e}")
 
 
 
 
 
169
  with st.expander("Create New Index"):
170
  new_idx_name = st.text_input("Index Name")
171
+ new_idx_dim = st.selectbox("Dimension", [384, 768, 1024, 1536, 3072], index=0)
172
  if st.button("Create"):
173
+ with st.spinner("Provisioning..."):
174
  ok, msg = pm.create_index(new_idx_name, dimension=new_idx_dim)
175
+ if ok: st.success(msg); time.sleep(2); st.rerun()
 
 
 
176
  else: st.error(msg)
177
+ else: st.warning("No Pinecone Key")
 
178
 
179
  st.header("🧠 Intelligence")
180
+ st.subheader("1. Embeddings")
181
  embed_options = {
182
  "Standard (All-MiniLM, 384d)": "sentence-transformers/all-MiniLM-L6-v2",
183
  "High-Perf (MPNet, 768d)": "sentence-transformers/all-mpnet-base-v2",
 
186
  embed_choice_label = st.selectbox("Select Embedding Model", list(embed_options.keys()))
187
  st.session_state.active_embed_model = embed_options[embed_choice_label]
188
 
189
+ st.subheader("2. Chat Model")
190
+ model_map = {"Granite 4 (IBM)": "granite4:latest", "Llama 3.2 (Meta)": "llama3.2:latest", "Gemma 3 (Google)": "gemma3:latest"}
 
 
 
 
191
  opts = list(model_map.keys())
 
192
  is_admin = "admin" in st.session_state.roles
193
  user_key = None
194
  if not is_admin:
195
+ user_key = st.text_input("Unlock GPT-4o", type="password")
196
+ st.session_state.user_openai_key = user_key if user_key else None
 
 
 
197
  else: st.session_state.user_openai_key = None
198
+ if is_admin or st.session_state.get("user_openai_key"): opts.append("GPT-4o (Omni)")
199
+ model_choice = st.radio("Select Model:", opts, key="model_selector_radio")
 
 
200
  st.info(f"Connected to: **{model_choice}**")
201
  st.divider()
202
  if st.session_state.authenticator: st.session_state.authenticator.logout(location='sidebar')
 
211
  with tab1:
212
  st.header("Discussion & Analysis")
213
  if "messages" not in st.session_state: st.session_state.messages = []
214
+
215
+ # RENDER DEBUG OVERLAY (If enabled in Admin)
216
+ admin_panel.render_debug_overlay("Chat Tab")
217
+
218
  c1, c2 = st.columns([3, 1])
219
  with c1: st.caption(f"Active Model: **{st.session_state.get('model_selector_radio', 'Granite')}**")
220
  with c2: use_rag = st.toggle("Enable Knowledge Base", value=False)
 
225
  with st.chat_message("user"): st.markdown(prompt)
226
  context_txt = ""
227
  sys_p = "You are a helpful AI assistant."
228
+ st.session_state.last_context_used = "" # Reset context debug
229
+
230
  if use_rag:
231
  if not st.session_state.active_index: st.error("⚠️ Please select an Active Index in the sidebar first.")
232
  else:
 
238
  embed_model_name=st.session_state.active_embed_model
239
  )
240
  if docs:
241
+ sys_p = "You are a Navy Document Analyst. Answer based PRIMARILY on the Context."
 
 
 
 
 
 
242
  for i, d in enumerate(docs):
243
  src = d.metadata.get('source', 'Unknown')
244
  context_txt += f"<document index='{i+1}' source='{src}'>\n{d.page_content}\n</document>\n"
245
+ # Debug Capture
246
+ st.session_state.last_context_used = context_txt
247
  if context_txt:
248
  final_prompt = f"User Question: {prompt}\n\n<context>\n{context_txt}\n</context>\n\nInstruction: Answer using the context above."
249
  else: final_prompt = prompt
250
+
251
  with st.chat_message("assistant"):
252
  with st.spinner("Thinking..."):
253
  hist = [{"role":"system", "content":sys_p}] + st.session_state.messages[-6:-1] + [{"role":"user", "content":final_prompt}]
 
265
  with tab2:
266
  st.header("Document Processor")
267
  c1, c2 = st.columns([1, 1])
268
+ with c1: uploaded_file = st.file_uploader("Upload File", type=["pdf", "docx", "pptx", "txt", "md"])
269
  with c2:
270
+ use_vision = st.toggle("👁️ Enable Vision Mode")
271
+ if use_vision and "GPT-4o" not in opts: st.warning("Vision requires OpenAI.")
 
272
  if uploaded_file:
273
  temp_path = rag_engine.save_uploaded_file(uploaded_file, st.session_state.username)
274
  col_a, col_b, col_c = st.columns(3)
275
  with col_a:
276
+ chunk_strategy = st.selectbox("Chunking Strategy", ["paragraph", "token"])
277
+ if st.button("📥 Add to KB", type="primary"):
278
+ if not st.session_state.active_index: st.error("Select Index first.")
279
  else:
280
  with st.spinner("Ingesting..."):
281
+ ok, msg = rag_engine.ingest_file(temp_path, st.session_state.username, st.session_state.active_index, st.session_state.active_embed_model, chunk_strategy)
282
+ if ok: tracker.upload_user_db(st.session_state.username); st.success(msg)
 
 
 
 
 
 
 
 
283
  else: st.error(msg)
284
  with col_b:
285
  st.write(""); st.write("")
286
+ if st.button("📝 Summarize"):
287
+ with st.spinner("Summarizing..."):
288
  key = st.session_state.get("user_openai_key") or OPENAI_KEY
289
  class FileObj:
290
  def __init__(self, p, n): self.path=p; self.name=n
291
  def read(self):
292
  with open(self.path, "rb") as f: return f.read()
293
  raw = doc_loader.extract_text_from_file(FileObj(temp_path, uploaded_file.name), use_vision=use_vision, api_key=key)
294
+ prompt = f"Summarize:\n\n{raw[:20000]}"
295
  msgs = [{"role":"user", "content": prompt}]
296
  summ, usage = query_model_universal(msgs, 1000, model_choice, st.session_state.get("user_openai_key"))
297
+ st.subheader("Summary"); st.markdown(summ)
298
  with col_c:
299
  st.write(""); st.write("")
300
  if "flattened_result" not in st.session_state: st.session_state.flattened_result = None
301
+ if st.button("📄 Flatten"):
302
  with st.spinner("Flattening..."):
303
  key = st.session_state.get("user_openai_key") or OPENAI_KEY
304
  with open(temp_path, "rb") as f:
 
306
  def __init__(self, data, n): self.data=data; self.name=n
307
  def read(self): return self.data
308
  raw = doc_loader.extract_text_from_file(Wrapper(f.read(), uploaded_file.name), use_vision=use_vision, api_key=key)
309
+ proc = OutlineProcessor(raw); items = proc.parse()
310
+ out_txt = []; bar = st.progress(0)
 
 
311
  for i, item in enumerate(items):
312
  p = f"Context: {item['context']}\nTarget: {item['target']}\nRewrite as one sentence."
313
  m = [{"role":"user", "content": p}]
314
  res, _ = query_model_universal(m, 300, model_choice, st.session_state.get("user_openai_key"))
315
+ out_txt.append(res); bar.progress((i+1)/len(items))
 
316
  final_flattened_text = "\n".join(out_txt)
317
  st.session_state.flattened_result = {"text": final_flattened_text, "source": f"{uploaded_file.name}_flat"}
318
  st.rerun()
319
  if st.session_state.flattened_result:
320
  res = st.session_state.flattened_result
321
+ st.success("Complete!"); st.text_area("Result", res["text"], height=200)
322
+ if st.button("📥 Index Flat"):
323
+ if not st.session_state.active_index: st.error("Select Index.")
 
324
  else:
325
  with st.spinner("Indexing..."):
326
+ ok, msg = rag_engine.process_and_add_text(res["text"], res["source"], st.session_state.username, st.session_state.active_index)
327
+ if ok: tracker.upload_user_db(st.session_state.username); st.success(msg)
 
 
 
 
 
 
 
328
  else: st.error(msg)
329
  st.divider()
330
  st.subheader("Database Management")
331
+ c1, c2 = st.columns([2, 1])
332
+ with c1: st.info("Missing local files? Resync below.")
333
+ with c2:
334
  if st.button("🔄 Resync from Pinecone"):
335
+ if not st.session_state.active_index: st.error("Select Index.")
336
  else:
337
+ with st.spinner("Resyncing..."):
338
  ok, msg = rag_engine.rebuild_cache_from_pinecone(st.session_state.username, st.session_state.active_index)
339
  if ok: st.success(msg); time.sleep(1); st.rerun()
340
  else: st.error(msg)
 
341
  docs = rag_engine.list_documents(st.session_state.username)
342
  if docs:
343
  for d in docs:
344
  c1, c2 = st.columns([4,1])
345
+ c1.text(f"📄 {d['filename']}")
346
  if c2.button("🗑️", key=d['source']):
347
+ if not st.session_state.active_index: st.error("Select Index.")
348
  else:
349
  rag_engine.delete_document(st.session_state.username, d['source'], st.session_state.active_index)
350
+ tracker.upload_user_db(st.session_state.username); st.rerun()
351
+ else: st.warning("Cache Empty.")
 
352
 
353
  # === TAB 3: QUIZ MODE ===
354
  with tab3:
355
  st.header("⚓ Qualification Board Simulator")
356
 
357
+ # RENDER DEBUG OVERLAY (If enabled)
358
+ admin_panel.render_debug_overlay("Quiz Tab")
 
 
359
 
360
+ col_mode, col_streak = st.columns([3, 1])
361
+ with col_mode: quiz_mode = st.radio("Mode:", ["⚡ Acronym Lightning Round", "📖 Document Deep Dive"], horizontal=True)
362
+ if "Document" in quiz_mode: focus_topic = st.text_input("🎯 Focus Topic", placeholder="e.g., PPBE...", help="Leave empty for random.")
363
+ else: focus_topic = None
 
364
 
365
  if "last_quiz_mode" not in st.session_state: st.session_state.last_quiz_mode = quiz_mode
366
  if "quiz_trigger" not in st.session_state: st.session_state.quiz_trigger = False
 
367
  if st.session_state.last_quiz_mode != quiz_mode:
368
+ st.session_state.quiz_state["active"] = False; st.session_state.quiz_state["question_data"] = None; st.session_state.quiz_state["feedback"] = None; st.session_state.quiz_state["generated_question_text"] = ""
369
+ st.session_state.last_quiz_mode = quiz_mode; st.rerun()
 
 
 
 
370
 
371
+ quiz = QuizEngine(); qs = st.session_state.quiz_state
372
+ with col_streak: st.metric("Streak", qs["streak"]);
373
+ if st.button("Reset"): qs["streak"] = 0
 
 
 
 
 
374
  if st.session_state.quiz_history:
375
+ with st.expander(f"📚 Review Study Guide ({len(st.session_state.quiz_history)})"):
376
+ st.download_button("📥 Download Markdown", generate_study_guide_md(st.session_state.quiz_history), f"StudyGuide_{datetime.now().strftime('%Y%m%d')}.md")
 
 
 
 
 
377
  st.divider()
378
 
379
  def generate_question():
380
+ with st.spinner("Consulting Board..."):
381
+ st.session_state.last_context_used = "" # Reset context debug
382
  if "Acronym" in quiz_mode:
383
  q_data = quiz.get_random_acronym()
384
+ if q_data: qs["active"]=True; qs["question_data"]=q_data; qs["feedback"]=None; qs["generated_question_text"]=q_data["question"]
385
+ else: st.error("No acronyms.")
 
386
  else:
387
+ valid_question_found = False; attempts = 0; last_error = None
 
 
 
388
  while not valid_question_found and attempts < 5:
389
  attempts += 1
390
  q_ctx = quiz.get_document_context(st.session_state.username, topic_filter=focus_topic)
391
+ if q_ctx and "error" in q_ctx: last_error = q_ctx["error"]; break
 
 
 
 
 
392
  if q_ctx:
393
  prompt = quiz.construct_question_generation_prompt(q_ctx["context_text"])
394
+ # DEBUG CAPTURE
395
+ st.session_state.last_context_used = q_ctx["context_text"]
396
 
397
+ question_text, usage = query_model_universal([{"role": "user", "content": prompt}], 300, model_choice, st.session_state.get("user_openai_key"))
398
  if "UNABLE" not in question_text and len(question_text) > 10:
399
+ valid_question_found = True; qs["active"]=True; qs["question_data"]=q_ctx; qs["generated_question_text"]=question_text; qs["feedback"]=None
 
400
  if not valid_question_found:
401
+ if last_error == "topic_not_found": st.warning(f"Topic '{focus_topic}' not found.")
402
+ elif focus_topic: st.warning(f"Found '{focus_topic}' but could not generate question.")
403
+ else: st.warning("Could not generate question. Try Resync.")
 
 
 
 
 
 
 
 
 
404
 
405
+ if st.session_state.quiz_trigger: st.session_state.quiz_trigger = False; generate_question(); st.rerun()
406
  if not qs["active"]:
407
+ if st.button("🚀 New Question", type="primary"): generate_question(); st.rerun()
 
 
408
 
409
  if qs["active"]:
410
  st.markdown(f"### {qs['generated_question_text']}")
411
  if "document" in qs.get("question_data", {}).get("type", ""): st.caption(f"Source: *{qs['question_data']['source_file']}*")
 
412
  with st.form(key="quiz_response"):
413
+ user_ans = st.text_area("Answer:")
414
+ sub = st.form_submit_button("Submit")
 
415
  if sub and user_ans:
416
+ with st.spinner("Grading..."):
417
  data = qs["question_data"]
 
 
418
  if data["type"] == "acronym":
419
+ prompt = quiz.construct_acronym_grading_prompt(data["term"], data["correct_definition"], user_ans)
 
 
 
420
  final_context_for_history = data["correct_definition"]
 
421
  else:
422
+ combined_context = f"--- PRIMARY SOURCE ---\n{data['context_text']}\n\n"
423
+ # RERANKED SEARCH INJECTION
 
 
 
 
424
  if st.session_state.active_index and st.session_state.get("active_embed_model"):
425
  try:
 
 
426
  related_docs = rag_engine.search_knowledge_base(
427
  query=qs["generated_question_text"],
428
  username=st.session_state.username,
429
  index_name=st.session_state.active_index,
430
  embed_model_name=st.session_state.active_embed_model,
431
+ k=15, final_k=5 # Broad retrieval + Rerank
 
432
  )
 
433
  if related_docs:
434
+ combined_context += "--- RELATED ---\n"
435
+ for i, doc in enumerate(related_docs): combined_context += f"[Source {i+1}]: {doc.page_content}\n\n"
436
+ except Exception as e: print(f"Search failed: {e}")
 
 
 
 
 
 
 
 
437
 
438
+ prompt = quiz.construct_grading_prompt(qs["generated_question_text"], user_ans, combined_context)
439
  final_context_for_history = combined_context
440
+ # DEBUG CAPTURE FOR GRADING
441
+ st.session_state.last_context_used = combined_context
442
 
 
443
  msgs = [{"role": "user", "content": prompt}]
444
+ grade, _ = query_model_universal(msgs, 1000, model_choice, st.session_state.get("user_openai_key"))
 
 
 
445
  qs["feedback"] = grade
 
 
446
  is_pass = "PASS" in grade
447
  if is_pass: qs["streak"] += 1
448
  elif "FAIL" in grade: qs["streak"] = 0
449
+ st.session_state.quiz_history.append({"question": qs["generated_question_text"], "user_answer": user_ans, "grade": "PASS" if is_pass else "FAIL", "context": final_context_for_history})
 
 
 
 
 
 
 
 
450
  st.rerun()
451
 
452
  if qs["feedback"]:
 
454
  if "PASS" in qs["feedback"]: st.success("✅ CORRECT")
455
  else:
456
  if "FAIL" in qs["feedback"]: st.error("❌ INCORRECT")
457
+ else: st.warning("⚠️ PARTIAL")
458
  st.markdown(qs["feedback"])
 
459
  data = qs["question_data"]
460
+ if data["type"] == "acronym": st.info(f"**Definition:** {data['correct_definition']}")
461
  elif data["type"] == "document":
462
+ with st.expander("Show Answer Key"): st.info(data["context_text"])
 
463
  if st.button("Next Question ➡️"):
464
+ st.session_state.quiz_trigger = True; qs["active"]=False; qs["question_data"]=None; qs["feedback"]=None; st.rerun()