NavyDevilDoc commited on
Commit
b72f944
Β·
verified Β·
1 Parent(s): 9cba4cb

Update src/app.py

Browse files

Added quiz component

Files changed (1) hide show
  1. src/app.py +97 -1
src/app.py CHANGED
@@ -11,6 +11,7 @@ import doc_loader
11
  from openai import OpenAI
12
  from datetime import datetime
13
  from test_integration import run_tests
 
14
 
15
  # --- CONFIGURATION ---
16
  st.set_page_config(page_title="Navy AI Toolkit", page_icon="βš“", layout="wide")
@@ -22,6 +23,15 @@ OPENAI_KEY = os.getenv("OPENAI_API_KEY")
22
  if "roles" not in st.session_state:
23
  st.session_state.roles = []
24
 
 
 
 
 
 
 
 
 
 
25
  # --- FLATTENER LOGIC (Integrated) ---
26
  class OutlineProcessor:
27
  """Parses text outlines for the Flattener tool."""
@@ -239,7 +249,7 @@ update_sidebar_metrics()
239
 
240
  # --- MAIN APP ---
241
  st.title("βš“ Navy AI Toolkit")
242
- tab1, tab2 = st.tabs(["πŸ’¬ Chat Playground", "πŸ“‚ Knowledge & Tools"])
243
 
244
  # === TAB 1: CHAT ===
245
  with tab1:
@@ -456,6 +466,92 @@ with tab2:
456
  st.error(msg)
457
 
458
  st.divider()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
459
 
460
  # DB MANAGER
461
  st.subheader("Database Management")
 
11
  from openai import OpenAI
12
  from datetime import datetime
13
  from test_integration import run_tests
14
+ from core.QuizEngine import QuizEngine
15
 
16
  # --- CONFIGURATION ---
17
  st.set_page_config(page_title="Navy AI Toolkit", page_icon="βš“", layout="wide")
 
23
  if "roles" not in st.session_state:
24
  st.session_state.roles = []
25
 
26
+ if "quiz_state" not in st.session_state:
27
+ st.session_state.quiz_state = {
28
+ "active": False, # Is a question currently displayed?
29
+ "question_data": None, # The current acronym object
30
+ "user_answer": "", # What the user typed
31
+ "feedback": None, # The LLM's grading response
32
+ "streak": 0 # Fun gamification metric
33
+ }
34
+
35
  # --- FLATTENER LOGIC (Integrated) ---
36
  class OutlineProcessor:
37
  """Parses text outlines for the Flattener tool."""
 
249
 
250
  # --- MAIN APP ---
251
  st.title("βš“ Navy AI Toolkit")
252
+ tab1, tab2, tab3 = st.tabs(["πŸ’¬ Chat Playground", "πŸ“‚ Knowledge & Tools", "⚑ Quiz Mode"])
253
 
254
  # === TAB 1: CHAT ===
255
  with tab1:
 
466
  st.error(msg)
467
 
468
  st.divider()
469
+
470
+ # === TAB 3: QUIZ MODE ===
471
+ with tab3:
472
+ st.header("⚑ Acronym Lightning Round")
473
+ st.caption("Test your knowledge against your own Acronym Dictionary.")
474
+
475
+ # Initialize Engine
476
+ quiz = QuizEngine()
477
+ qs = st.session_state.quiz_state
478
+
479
+ # METRICS ROW
480
+ m1, m2, m3 = st.columns(3)
481
+ m1.metric("Streak", qs["streak"])
482
+
483
+ # 1. START / NEXT QUESTION BUTTON
484
+ if not qs["active"]:
485
+ if st.button("πŸš€ Start New Question", type="primary"):
486
+ q_data = quiz.get_random_acronym()
487
+ if q_data:
488
+ qs["active"] = True
489
+ qs["question_data"] = q_data
490
+ qs["feedback"] = None
491
+ qs["user_answer"] = ""
492
+ st.rerun()
493
+ else:
494
+ st.warning("Acronym Dictionary is empty! Please upload 'acronyms.json' or run the local extractor.")
495
+
496
+ # 2. QUESTION DISPLAY
497
+ if qs["active"] and qs["question_data"]:
498
+ st.divider()
499
+ st.markdown(f"### {qs['question_data']['question']}")
500
+
501
+ # User Input
502
+ # We use a form so hitting 'Enter' submits the answer
503
+ with st.form(key="quiz_form"):
504
+ user_input = st.text_input("Your Definition:", value=qs.get("user_answer_temp", ""))
505
+ submit_btn = st.form_submit_button("Submit Answer")
506
+
507
+ if submit_btn and user_input:
508
+ qs["user_answer"] = user_input
509
+
510
+ # 3. GRADING LOGIC
511
+ with st.spinner("The Board is deliberating..."):
512
+ # Construct Prompt
513
+ term = qs["question_data"]["term"]
514
+ correct_def = qs["question_data"]["correct_definition"]
515
+ prompt = quiz.construct_grading_prompt(term, correct_def, user_input)
516
+
517
+ # Call LLM (Reusing your existing query helper)
518
+ msgs = [{"role": "user", "content": prompt}]
519
+ grade, usage = query_model_universal(
520
+ msgs, 300, model_choice, st.session_state.get("user_openai_key")
521
+ )
522
+
523
+ qs["feedback"] = grade
524
+
525
+ # Simple Streak Logic (If LLM says PASS)
526
+ if "GRADE:** PASS" in grade or "GRADE:** Pass" in grade:
527
+ qs["streak"] += 1
528
+ elif "GRADE:** FAIL" in grade:
529
+ qs["streak"] = 0
530
+
531
+ # Log usage
532
+ if usage:
533
+ m_name = "GPT-4o" if "GPT-4o" in model_choice else model_choice.split()[0]
534
+ tracker.log_usage(m_name, usage["input"], usage["output"])
535
+ update_sidebar_metrics()
536
+
537
+ st.rerun()
538
+
539
+ # 4. FEEDBACK DISPLAY
540
+ if qs["feedback"]:
541
+ st.divider()
542
+ if "PASS" in qs["feedback"]:
543
+ st.success("βœ… CORRECT")
544
+ else:
545
+ st.error("❌ INCORRECT")
546
+
547
+ st.markdown(qs["feedback"])
548
+ st.info(f"**Official Definition:** {qs['question_data']['correct_definition']}")
549
+
550
+ if st.button("Next Question ➑️"):
551
+ qs["active"] = False
552
+ qs["question_data"] = None
553
+ qs["feedback"] = None
554
+ st.rerun()
555
 
556
  # DB MANAGER
557
  st.subheader("Database Management")