NavyDevilDoc commited on
Commit
fb2a6e3
·
verified ·
1 Parent(s): 593ecb0

Update src/core/QuizEngine.py

Browse files

added short answer-type questions

Files changed (1) hide show
  1. src/core/QuizEngine.py +79 -19
src/core/QuizEngine.py CHANGED
@@ -1,20 +1,19 @@
1
  import random
 
 
2
  from core.AcronymManager import AcronymManager
3
 
4
  class QuizEngine:
5
- def __init__(self):
6
- # reuse the existing manager to load the JSON
7
  self.acronym_mgr = AcronymManager()
 
 
8
 
 
9
  def get_random_acronym(self):
10
- """
11
- Fetches a random acronym-definition pair.
12
- Returns: dict or None (if empty)
13
- """
14
  if not self.acronym_mgr.acronyms:
15
  return None
16
 
17
- # Pick a random key
18
  acronym = random.choice(list(self.acronym_mgr.acronyms.keys()))
19
  definition = self.acronym_mgr.acronyms[acronym]
20
 
@@ -25,21 +24,82 @@ class QuizEngine:
25
  "question": f"What does **{acronym}** stand for?"
26
  }
27
 
28
- def construct_grading_prompt(self, term, correct_definition, user_answer):
 
29
  """
30
- Builds the prompt for the Board Examiner persona.
31
- We return the string here so app.py can send it to whichever LLM is active.
32
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  return (
34
- f"You are a strict US Navy Engineering Duty Officer Board Examiner.\n"
35
- f"I am a candidate. You asked me to define the acronym: {term}\n\n"
36
- f"The Official Definition is: {correct_definition}\n"
37
- f"My Answer was: {user_answer}\n\n"
38
- f"INSTRUCTIONS:\n"
39
- f"1. Grade my answer as PASS (Correct expansion) or FAIL (Incorrect expansion).\n"
40
- f"2. If I got the words right but missed the full context, give me a 'PASS with Comments'.\n"
41
- f"3. Keep your feedback short and military-professional.\n\n"
 
 
 
 
 
 
 
 
 
 
42
  f"OUTPUT FORMAT:\n"
43
  f"**GRADE:** [PASS/FAIL]\n"
44
- f"**CRITIQUE:** [Your brief feedback]"
 
 
 
 
 
 
 
 
 
 
45
  )
 
1
  import random
2
+ import os
3
+ import logging
4
  from core.AcronymManager import AcronymManager
5
 
6
  class QuizEngine:
7
+ def __init__(self, source_dir="source_documents"):
 
8
  self.acronym_mgr = AcronymManager()
9
+ self.source_dir = source_dir
10
+ self.logger = logging.getLogger(__name__)
11
 
12
+ # --- MODE 1: ACRONYMS ---
13
  def get_random_acronym(self):
 
 
 
 
14
  if not self.acronym_mgr.acronyms:
15
  return None
16
 
 
17
  acronym = random.choice(list(self.acronym_mgr.acronyms.keys()))
18
  definition = self.acronym_mgr.acronyms[acronym]
19
 
 
24
  "question": f"What does **{acronym}** stand for?"
25
  }
26
 
27
+ # --- MODE 2: DOCUMENTS (NEW) ---
28
+ def get_document_context(self, username):
29
  """
30
+ Picks a random file, reads it, and selects a random segment.
31
+ Returns the segment to be used as the 'Answer Key'.
32
  """
33
+ user_dir = os.path.join(self.source_dir, username)
34
+ if not os.path.exists(user_dir):
35
+ return None
36
+
37
+ # 1. Get list of text/md files
38
+ files = [f for f in os.listdir(user_dir) if f.lower().endswith(('.txt', '.md'))]
39
+ if not files:
40
+ return None
41
+
42
+ # 2. Pick a random file
43
+ selected_file = random.choice(files)
44
+ file_path = os.path.join(user_dir, selected_file)
45
+
46
+ try:
47
+ with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
48
+ text = f.read()
49
+
50
+ # 3. Split into rough paragraphs (double line break)
51
+ # Filter out short/empty paragraphs to avoid bad questions
52
+ paragraphs = [p.strip() for p in text.split('\n\n') if len(p.strip()) > 200]
53
+
54
+ if not paragraphs:
55
+ return None
56
+
57
+ # 4. Pick a random paragraph
58
+ selected_context = random.choice(paragraphs)
59
+
60
+ return {
61
+ "type": "document",
62
+ "source_file": selected_file,
63
+ "context_text": selected_context
64
+ }
65
+
66
+ except Exception as e:
67
+ self.logger.error(f"Error fetching quiz context: {e}")
68
+ return None
69
+
70
+ # --- PROMPT CONSTRUCTORS ---
71
+ def construct_question_generation_prompt(self, context_text):
72
+ """Asks the LLM to write the question for us."""
73
  return (
74
+ f"Act as a US Navy Engineering Duty Officer Board Examiner.\n"
75
+ f"Read the following technical excerpt from a Navy reference:\n\n"
76
+ f"'{context_text}'\n\n"
77
+ f"TASK: Generate a single, tough, short-answer board question based ONLY on this text.\n"
78
+ f"The question should test recall of specific details (e.g., 'What are the 3 documents required for...').\n"
79
+ f"OUTPUT: Just the question text. No preamble."
80
+ )
81
+
82
+ def construct_grading_prompt(self, question, answer, context_text):
83
+ """Grades the deep dive answer."""
84
+ return (
85
+ f"You are a Board Examiner.\n"
86
+ f"Reference Material: '{context_text}'\n\n"
87
+ f"Question: {question}\n"
88
+ f"Candidate Answer: {answer}\n\n"
89
+ f"TASK: Grade the answer based strictly on the Reference Material.\n"
90
+ f"1. If the answer is factually correct according to the text, grade PASS.\n"
91
+ f"2. If it misses key details mentioned in the text, grade FAIL or PASS with Comments.\n"
92
  f"OUTPUT FORMAT:\n"
93
  f"**GRADE:** [PASS/FAIL]\n"
94
+ f"**FEEDBACK:** [Brief correction or confirmation]"
95
+ )
96
+
97
+ def construct_acronym_grading_prompt(self, term, correct_definition, user_answer):
98
+ """Grades the acronym answer."""
99
+ return (
100
+ f"Term: {term}\n"
101
+ f"Official Definition: {correct_definition}\n"
102
+ f"User Answer: {user_answer}\n\n"
103
+ f"Grade as PASS (correct expansion) or FAIL. If close, PASS with comment.\n"
104
+ f"Output: **GRADE:** [Status]\n**FEEDBACK:** [Details]"
105
  )