NavyDevilDoc commited on
Commit
ffc0162
Β·
verified Β·
1 Parent(s): ccd1e0f

Update src/app.py

Browse files

modified tab 3 to fit with the new quiz mode

Files changed (1) hide show
  1. src/app.py +142 -55
src/app.py CHANGED
@@ -408,103 +408,190 @@ with tab2:
408
  with tab3:
409
  st.header("βš“ Qualification Board Simulator")
410
  admin_panel.render_debug_overlay("Quiz Tab")
 
411
  col_mode, col_streak = st.columns([3, 1])
412
- with col_mode: quiz_mode = st.radio("Mode:", ["⚑ Acronym Lightning Round", "πŸ“– Document Deep Dive"], horizontal=True)
413
- if "Document" in quiz_mode: focus_topic = st.text_input("🎯 Focus Topic", placeholder="e.g., PPBE...", help="Leave empty for random.")
414
- else: focus_topic = None
 
 
 
 
415
 
416
  if "last_quiz_mode" not in st.session_state: st.session_state.last_quiz_mode = quiz_mode
417
  if "quiz_trigger" not in st.session_state: st.session_state.quiz_trigger = False
 
418
  if st.session_state.last_quiz_mode != quiz_mode:
419
- st.session_state.quiz_state["active"] = False; st.session_state.quiz_state["question_data"] = None; st.session_state.quiz_state["feedback"] = None; st.session_state.quiz_state["generated_question_text"] = ""
420
- st.session_state.last_quiz_mode = quiz_mode; st.rerun()
 
 
 
 
421
 
422
- quiz = QuizEngine(); qs = st.session_state.quiz_state
423
- with col_streak: st.metric("Streak", qs["streak"]);
424
- if st.button("Reset"): qs["streak"] = 0
 
 
 
 
425
  if st.session_state.quiz_history:
426
  with st.expander(f"πŸ“š Review Study Guide ({len(st.session_state.quiz_history)})"):
427
- st.download_button("πŸ“₯ Download Markdown", generate_study_guide_md(st.session_state.quiz_history), f"StudyGuide_{datetime.now().strftime('%Y%m%d')}.md")
 
 
 
 
428
  st.divider()
429
 
430
  def generate_question():
431
  with st.spinner("Consulting Board..."):
432
  st.session_state.last_context_used = ""
 
433
  if "Acronym" in quiz_mode:
434
  q_data = quiz.get_random_acronym()
435
- if q_data: qs["active"]=True; qs["question_data"]=q_data; qs["feedback"]=None; qs["generated_question_text"]=q_data["question"]
436
- else: st.error("No acronyms.")
 
 
 
 
 
437
  else:
438
- valid_question_found = False; attempts = 0; last_error = None
 
 
 
439
  while not valid_question_found and attempts < 5:
440
  attempts += 1
441
  q_ctx = quiz.get_document_context(st.session_state.username, topic_filter=focus_topic)
442
- if q_ctx and "error" in q_ctx: last_error = q_ctx["error"]; break
 
 
 
 
443
  if q_ctx:
444
- prompt = quiz.construct_question_generation_prompt(q_ctx["context_text"])
 
445
  st.session_state.last_context_used = q_ctx["context_text"]
446
- question_text, usage = query_model_universal([{"role": "user", "content": prompt}], 300, model_choice, st.session_state.get("user_openai_key"))
447
- if "UNABLE" not in question_text and len(question_text) > 10:
448
- valid_question_found = True; qs["active"]=True; qs["question_data"]=q_ctx; qs["generated_question_text"]=question_text; qs["feedback"]=None
449
- if not valid_question_found:
450
- if last_error == "topic_not_found": st.warning(f"Topic '{focus_topic}' not found.")
451
- elif focus_topic: st.warning(f"Found '{focus_topic}' but could not generate question.")
452
- else: st.warning("Could not generate question. Try Resync.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
453
 
454
- if st.session_state.quiz_trigger: st.session_state.quiz_trigger = False; generate_question(); st.rerun()
 
 
 
 
 
 
 
 
 
 
 
 
455
  if not qs["active"]:
456
- if st.button("πŸš€ New Question", type="primary"): generate_question(); st.rerun()
 
 
457
 
458
  if qs["active"]:
459
  st.markdown(f"### {qs['generated_question_text']}")
460
- if "document" in qs.get("question_data", {}).get("type", ""): st.caption(f"Source: *{qs['question_data']['source_file']}*")
 
 
 
461
  with st.form(key="quiz_response"):
462
  user_ans = st.text_area("Answer:")
463
  sub = st.form_submit_button("Submit")
 
464
  if sub and user_ans:
465
- with st.spinner("Grading..."):
466
  data = qs["question_data"]
 
467
  if data["type"] == "acronym":
468
  prompt = quiz.construct_acronym_grading_prompt(data["term"], data["correct_definition"], user_ans)
469
  final_context_for_history = data["correct_definition"]
 
 
 
470
  else:
471
- combined_context = f"--- PRIMARY SOURCE ---\n{data['context_text']}\n\n"
472
- if st.session_state.active_index and st.session_state.get("active_embed_model"):
473
- try:
474
- related_docs = rag_engine.search_knowledge_base(
475
- query=qs["generated_question_text"],
476
- username=st.session_state.username,
477
- index_name=st.session_state.active_index,
478
- embed_model_name=st.session_state.active_embed_model,
479
- k=15, final_k=5
480
- )
481
- if related_docs:
482
- combined_context += "--- RELATED ---\n"
483
- for i, doc in enumerate(related_docs): combined_context += f"[Source {i+1}]: {doc.page_content}\n\n"
484
- except Exception as e: print(f"Search failed: {e}")
485
- prompt = quiz.construct_grading_prompt(qs["generated_question_text"], user_ans, combined_context)
486
- final_context_for_history = combined_context
487
- st.session_state.last_context_used = combined_context
488
-
489
- msgs = [{"role": "user", "content": prompt}]
490
- grade, _ = query_model_universal(msgs, 1000, model_choice, st.session_state.get("user_openai_key"))
491
- qs["feedback"] = grade
492
- is_pass = "PASS" in grade
493
- if is_pass: qs["streak"] += 1
494
- elif "FAIL" in grade: qs["streak"] = 0
495
- st.session_state.quiz_history.append({"question": qs["generated_question_text"], "user_answer": user_ans, "grade": "PASS" if is_pass else "FAIL", "context": final_context_for_history})
 
 
 
 
 
 
496
  st.rerun()
497
 
498
  if qs["feedback"]:
499
  st.divider()
500
- if "PASS" in qs["feedback"]: st.success("βœ… CORRECT")
 
501
  else:
502
  if "FAIL" in qs["feedback"]: st.error("❌ INCORRECT")
503
- else: st.warning("⚠️ PARTIAL")
 
504
  st.markdown(qs["feedback"])
 
505
  data = qs["question_data"]
506
- if data["type"] == "acronym": st.info(f"**Definition:** {data['correct_definition']}")
 
507
  elif data["type"] == "document":
508
- with st.expander("Show Answer Key"): st.info(data["context_text"])
 
 
509
  if st.button("Next Question ➑️"):
510
- st.session_state.quiz_trigger = True; qs["active"]=False; qs["question_data"]=None; qs["feedback"]=None; st.rerun()
 
 
 
 
 
408
  with tab3:
409
  st.header("βš“ Qualification Board Simulator")
410
  admin_panel.render_debug_overlay("Quiz Tab")
411
+
412
  col_mode, col_streak = st.columns([3, 1])
413
+ with col_mode:
414
+ quiz_mode = st.radio("Mode:", ["⚑ Acronym Lightning Round", "πŸ“– Document Deep Dive"], horizontal=True)
415
+
416
+ if "Document" in quiz_mode:
417
+ focus_topic = st.text_input("🎯 Focus Topic", placeholder="e.g., PPBE...", help="Leave empty for random.")
418
+ else:
419
+ focus_topic = None
420
 
421
  if "last_quiz_mode" not in st.session_state: st.session_state.last_quiz_mode = quiz_mode
422
  if "quiz_trigger" not in st.session_state: st.session_state.quiz_trigger = False
423
+
424
  if st.session_state.last_quiz_mode != quiz_mode:
425
+ st.session_state.quiz_state["active"] = False
426
+ st.session_state.quiz_state["question_data"] = None
427
+ st.session_state.quiz_state["feedback"] = None
428
+ st.session_state.quiz_state["generated_question_text"] = ""
429
+ st.session_state.last_quiz_mode = quiz_mode
430
+ st.rerun()
431
 
432
+ quiz = QuizEngine()
433
+ qs = st.session_state.quiz_state
434
+
435
+ with col_streak:
436
+ st.metric("Streak", qs["streak"])
437
+ if st.button("Reset"): qs["streak"] = 0
438
+
439
  if st.session_state.quiz_history:
440
  with st.expander(f"πŸ“š Review Study Guide ({len(st.session_state.quiz_history)})"):
441
+ st.download_button(
442
+ "πŸ“₯ Download Markdown",
443
+ generate_study_guide_md(st.session_state.quiz_history),
444
+ f"StudyGuide_{datetime.now().strftime('%Y%m%d')}.md"
445
+ )
446
  st.divider()
447
 
448
  def generate_question():
449
  with st.spinner("Consulting Board..."):
450
  st.session_state.last_context_used = ""
451
+
452
  if "Acronym" in quiz_mode:
453
  q_data = quiz.get_random_acronym()
454
+ if q_data:
455
+ qs["active"]=True
456
+ qs["question_data"]=q_data
457
+ qs["feedback"]=None
458
+ qs["generated_question_text"]=q_data["question"]
459
+ else:
460
+ st.error("No acronyms.")
461
  else:
462
+ valid_question_found = False
463
+ attempts = 0
464
+ last_error = None
465
+
466
  while not valid_question_found and attempts < 5:
467
  attempts += 1
468
  q_ctx = quiz.get_document_context(st.session_state.username, topic_filter=focus_topic)
469
+
470
+ if q_ctx and "error" in q_ctx:
471
+ last_error = q_ctx["error"]
472
+ break
473
+
474
  if q_ctx:
475
+ # NEW: Use the Scenario Prompt
476
+ prompt = quiz.construct_scenario_prompt(q_ctx["context_text"])
477
  st.session_state.last_context_used = q_ctx["context_text"]
478
+
479
+ # Generate
480
+ response_text, usage = query_model_universal([{"role": "user", "content": prompt}], 600, model_choice, st.session_state.get("user_openai_key"))
481
+
482
+ # PARSE OUTPUT (Scenario vs Solution)
483
+ if "SCENARIO:" in response_text and "SOLUTION:" in response_text:
484
+ parts = response_text.split("SOLUTION:")
485
+ scenario_text = parts[0].replace("SCENARIO:", "").strip()
486
+ solution_text = parts[1].strip()
487
+
488
+ valid_question_found = True
489
+ qs["active"] = True
490
+ qs["question_data"] = q_ctx
491
+ qs["generated_question_text"] = scenario_text
492
+ qs["hidden_solution"] = solution_text
493
+ qs["feedback"] = None
494
+ else:
495
+ # Fallback if model ignores format
496
+ valid_question_found = True
497
+ qs["active"] = True
498
+ qs["question_data"] = q_ctx
499
+ qs["generated_question_text"] = response_text
500
+ qs["hidden_solution"] = "Refer to Source Text."
501
+ qs["feedback"] = None
502
 
503
+ if not valid_question_found:
504
+ if last_error == "topic_not_found":
505
+ st.warning(f"Topic '{focus_topic}' not found.")
506
+ elif focus_topic:
507
+ st.warning(f"Found '{focus_topic}' but could not generate question.")
508
+ else:
509
+ st.warning("Could not generate question. Try Resync.")
510
+
511
+ if st.session_state.quiz_trigger:
512
+ st.session_state.quiz_trigger = False
513
+ generate_question()
514
+ st.rerun()
515
+
516
  if not qs["active"]:
517
+ if st.button("πŸš€ New Question", type="primary"):
518
+ generate_question()
519
+ st.rerun()
520
 
521
  if qs["active"]:
522
  st.markdown(f"### {qs['generated_question_text']}")
523
+
524
+ if "document" in qs.get("question_data", {}).get("type", ""):
525
+ st.caption(f"Source: *{qs['question_data']['source_file']}*")
526
+
527
  with st.form(key="quiz_response"):
528
  user_ans = st.text_area("Answer:")
529
  sub = st.form_submit_button("Submit")
530
+
531
  if sub and user_ans:
532
+ with st.spinner("Board is deliberating..."):
533
  data = qs["question_data"]
534
+
535
  if data["type"] == "acronym":
536
  prompt = quiz.construct_acronym_grading_prompt(data["term"], data["correct_definition"], user_ans)
537
  final_context_for_history = data["correct_definition"]
538
+ msgs = [{"role": "user", "content": prompt}]
539
+ grade, _ = query_model_universal(msgs, 1000, model_choice, st.session_state.get("user_openai_key"))
540
+ qs["feedback"] = grade
541
  else:
542
+ # NEW: Scenario Grading Logic
543
+ scenario = qs["generated_question_text"]
544
+ solution = qs.get("hidden_solution", "")
545
+ context_ref = data["context_text"]
546
+
547
+ prompt = quiz.construct_scenario_grading_prompt(scenario, user_ans, solution, context_ref)
548
+ st.session_state.last_context_used = f"SCENARIO: {scenario}\n\nSOLUTION: {solution}\n\nREF: {context_ref}"
549
+
550
+ msgs = [{"role": "user", "content": prompt}]
551
+ grade, _ = query_model_universal(msgs, 1000, model_choice, st.session_state.get("user_openai_key"))
552
+ qs["feedback"] = grade
553
+
554
+ # Logic to determine PASS/FAIL
555
+ is_pass = False
556
+ if "10/10" in grade or "9/10" in grade or "8/10" in grade or "7/10" in grade or "PASS" in grade:
557
+ is_pass = True
558
+ qs["streak"] += 1
559
+ elif "FAIL" in grade or " 6/" in grade or " 5/" in grade:
560
+ qs["streak"] = 0
561
+ else:
562
+ is_pass = True
563
+ qs["streak"] += 1
564
+
565
+ # Save history
566
+ st.session_state.quiz_history.append({
567
+ "question": qs["generated_question_text"],
568
+ "user_answer": user_ans,
569
+ "grade": "PASS" if is_pass else "FAIL", # Simplified for history list
570
+ "context": f"**Official Solution:** {qs.get('hidden_solution', '')}\n\n**Source Text:** {data.get('context_text', '')[:500]}..."
571
+ })
572
+
573
  st.rerun()
574
 
575
  if qs["feedback"]:
576
  st.divider()
577
+ if "PASS" in qs["feedback"] or "7/10" in qs["feedback"] or "8/10" in qs["feedback"] or "9/10" in qs["feedback"] or "10/10" in qs["feedback"]:
578
+ st.success("βœ… CORRECT / PASSING")
579
  else:
580
  if "FAIL" in qs["feedback"]: st.error("❌ INCORRECT")
581
+ else: st.warning("⚠️ PARTIAL / CRITIQUE")
582
+
583
  st.markdown(qs["feedback"])
584
+
585
  data = qs["question_data"]
586
+ if data["type"] == "acronym":
587
+ st.info(f"**Definition:** {data['correct_definition']}")
588
  elif data["type"] == "document":
589
+ with st.expander("Show Official Solution"):
590
+ st.info(qs.get("hidden_solution", "No solution generated."))
591
+
592
  if st.button("Next Question ➑️"):
593
+ st.session_state.quiz_trigger = True
594
+ qs["active"] = False
595
+ qs["question_data"] = None
596
+ qs["feedback"] = None
597
+ st.rerun()