AI_Toolkit / src /core /QuizEngine.py
NavyDevilDoc's picture
Update src/core/QuizEngine.py
ccd1e0f verified
import random
import os
import logging
from core.AcronymManager import AcronymManager
class QuizEngine:
def __init__(self, source_dir="source_documents"):
self.acronym_mgr = AcronymManager()
self.source_dir = source_dir
self.logger = logging.getLogger(__name__)
# --- MODE 1: ACRONYMS ---
def get_random_acronym(self):
if not self.acronym_mgr.acronyms: return None
acronym = random.choice(list(self.acronym_mgr.acronyms.keys()))
definition = self.acronym_mgr.acronyms[acronym]
return {
"type": "acronym",
"term": acronym,
"correct_definition": definition,
"question": f"What does **{acronym}** stand for?"
}
# --- MODE 2: SCENARIO SIMULATOR (Updated) ---
def get_document_context(self, username, topic_filter=None):
"""
Fetches a LARGE context chunk (4000 chars) to ensure continuity.
"""
user_dir = os.path.join(self.source_dir, username)
if not os.path.exists(user_dir): return None
files = [f for f in os.listdir(user_dir) if f.lower().endswith(('.txt', '.md'))]
if not files: return None
random.shuffle(files)
# Track if we found topic match
topic_match_found = False
for attempt in range(20):
selected_file = random.choice(files)
try:
file_path = os.path.join(user_dir, selected_file)
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
text = f.read()
if len(text.strip()) < 100: continue
# TIER 1: Topic Filter
if topic_filter:
if topic_filter.lower() not in text.lower(): continue
topic_match_found = True
# TIER 2: Large Window Extraction (The "Mega-Window")
# We grab 4000 chars instead of 1500 to get "Before & After" context
window_size = 4000
step_size = 2000
candidates = []
if len(text) < window_size:
candidates.append(text)
else:
for i in range(0, len(text) - window_size, step_size):
chunk = text[i : i + window_size]
if len(chunk.strip()) < 200: continue
if topic_filter and topic_filter.lower() not in chunk.lower(): continue
candidates.append(chunk)
# Fallback: If topic matches file but logic missed it, force a grab
if not candidates and topic_filter and topic_match_found:
idx = text.lower().find(topic_filter.lower())
start = max(0, idx - 1000)
end = min(len(text), idx + 3000)
candidates.append(text[start:end])
if not candidates: continue
selected_context = random.choice(candidates)
return {
"type": "document",
"source_file": selected_file,
"context_text": selected_context
}
except Exception as e:
self.logger.error(f"Error fetching context: {e}")
continue
if topic_filter and not topic_match_found: return {"error": "topic_not_found"}
return None
# --- PROMPTS ---
def construct_scenario_prompt(self, context_text):
"""
Generates a 'Board-Style' Scenario.
Forces the model to output a Scenario AND a Hidden Solution.
"""
return (
f"Act as a Senior Navy Board Examiner.\n"
f"Reference Material:\n'''{context_text}'''\n\n"
f"TASK: \n"
f"1. Identify a key technical concept in the text (e.g., Stability, Finance, Contracting).\n"
f"2. Create a REALISTIC SCENARIO based on this concept. Do not ask 'What is X?'. Instead, describe a situation (e.g., 'You are the DCA...', 'A contractor submits a bid...') and ask for the candidate's assessment.\n"
f"3. Create the OFFICIAL SOLUTION explaining the 'Why' behind the answer.\n\n"
f"STRICT OUTPUT FORMAT:\n"
f"SCENARIO: [Your scenario text here]\n"
f"SOLUTION: [The detailed answer key]"
)
def construct_scenario_grading_prompt(self, scenario, user_answer, solution, context_text):
"""
Grades with the specific 'Board Assessment' persona requested.
"""
return (
f"Act as a Senior Navy Board Examiner grading a candidate's oral response.\n\n"
f"--- THE SCENARIO ---\n{scenario}\n\n"
f"--- OFFICIAL SOLUTION (For You) ---\n{solution}\n\n"
f"--- REFERENCE TEXT ---\n{context_text}\n\n"
f"--- CANDIDATE ANSWER ---\n{user_answer}\n\n"
f"TASK: Grade the candidate.\n"
f"1. Compare their answer to the Official Solution and Reference Text.\n"
f"2. Look for technical precision (e.g., 'G rises' vs 'Weight moves').\n"
f"3. Provide a numeric grade and a structured critique.\n\n"
f"OUTPUT FORMAT:\n"
f"**Grade:** [0-10]/10\n"
f"**Critique:** [Your detailed feedback. Be firm but constructive. Highlight specifically what they missed (e.g., 'You identified the List, but failed to identify the Loll.').]"
)
# Legacy prompts (keep for safety if you switch modes)
def construct_acronym_grading_prompt(self, term, correct_definition, user_answer):
return f"Term: {term}\nDefinition: {correct_definition}\nAnswer: {user_answer}\nGrade PASS/FAIL."