Spaces:
Sleeping
Sleeping
File size: 5,849 Bytes
9cba4cb fb2a6e3 9cba4cb fb2a6e3 9cba4cb fb2a6e3 9cba4cb fb2a6e3 9cba4cb ccd1e0f 9cba4cb ccd1e0f c0086e2 ccd1e0f c0086e2 fb2a6e3 74a6e3b fb2a6e3 74a6e3b c0086e2 ccd1e0f 174e731 c0086e2 74a6e3b 73edd8f 74a6e3b fb2a6e3 ccd1e0f 174e731 ccd1e0f 174e731 ccd1e0f 74a6e3b ccd1e0f ff72774 ccd1e0f ff72774 fb2a6e3 ccd1e0f 174e731 ccd1e0f 174e731 74a6e3b fb2a6e3 74a6e3b fb2a6e3 74a6e3b ccd1e0f 74a6e3b ccd1e0f 73edd8f fb2a6e3 ccd1e0f e9fa08c ccd1e0f e9fa08c c0086e2 ccd1e0f e9fa08c ccd1e0f e9fa08c c0086e2 ccd1e0f c0086e2 ccd1e0f fb2a6e3 ccd1e0f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 | import random
import os
import logging
from core.AcronymManager import AcronymManager
class QuizEngine:
def __init__(self, source_dir="source_documents"):
self.acronym_mgr = AcronymManager()
self.source_dir = source_dir
self.logger = logging.getLogger(__name__)
# --- MODE 1: ACRONYMS ---
def get_random_acronym(self):
if not self.acronym_mgr.acronyms: return None
acronym = random.choice(list(self.acronym_mgr.acronyms.keys()))
definition = self.acronym_mgr.acronyms[acronym]
return {
"type": "acronym",
"term": acronym,
"correct_definition": definition,
"question": f"What does **{acronym}** stand for?"
}
# --- MODE 2: SCENARIO SIMULATOR (Updated) ---
def get_document_context(self, username, topic_filter=None):
"""
Fetches a LARGE context chunk (4000 chars) to ensure continuity.
"""
user_dir = os.path.join(self.source_dir, username)
if not os.path.exists(user_dir): return None
files = [f for f in os.listdir(user_dir) if f.lower().endswith(('.txt', '.md'))]
if not files: return None
random.shuffle(files)
# Track if we found topic match
topic_match_found = False
for attempt in range(20):
selected_file = random.choice(files)
try:
file_path = os.path.join(user_dir, selected_file)
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
text = f.read()
if len(text.strip()) < 100: continue
# TIER 1: Topic Filter
if topic_filter:
if topic_filter.lower() not in text.lower(): continue
topic_match_found = True
# TIER 2: Large Window Extraction (The "Mega-Window")
# We grab 4000 chars instead of 1500 to get "Before & After" context
window_size = 4000
step_size = 2000
candidates = []
if len(text) < window_size:
candidates.append(text)
else:
for i in range(0, len(text) - window_size, step_size):
chunk = text[i : i + window_size]
if len(chunk.strip()) < 200: continue
if topic_filter and topic_filter.lower() not in chunk.lower(): continue
candidates.append(chunk)
# Fallback: If topic matches file but logic missed it, force a grab
if not candidates and topic_filter and topic_match_found:
idx = text.lower().find(topic_filter.lower())
start = max(0, idx - 1000)
end = min(len(text), idx + 3000)
candidates.append(text[start:end])
if not candidates: continue
selected_context = random.choice(candidates)
return {
"type": "document",
"source_file": selected_file,
"context_text": selected_context
}
except Exception as e:
self.logger.error(f"Error fetching context: {e}")
continue
if topic_filter and not topic_match_found: return {"error": "topic_not_found"}
return None
# --- PROMPTS ---
def construct_scenario_prompt(self, context_text):
"""
Generates a 'Board-Style' Scenario.
Forces the model to output a Scenario AND a Hidden Solution.
"""
return (
f"Act as a Senior Navy Board Examiner.\n"
f"Reference Material:\n'''{context_text}'''\n\n"
f"TASK: \n"
f"1. Identify a key technical concept in the text (e.g., Stability, Finance, Contracting).\n"
f"2. Create a REALISTIC SCENARIO based on this concept. Do not ask 'What is X?'. Instead, describe a situation (e.g., 'You are the DCA...', 'A contractor submits a bid...') and ask for the candidate's assessment.\n"
f"3. Create the OFFICIAL SOLUTION explaining the 'Why' behind the answer.\n\n"
f"STRICT OUTPUT FORMAT:\n"
f"SCENARIO: [Your scenario text here]\n"
f"SOLUTION: [The detailed answer key]"
)
def construct_scenario_grading_prompt(self, scenario, user_answer, solution, context_text):
"""
Grades with the specific 'Board Assessment' persona requested.
"""
return (
f"Act as a Senior Navy Board Examiner grading a candidate's oral response.\n\n"
f"--- THE SCENARIO ---\n{scenario}\n\n"
f"--- OFFICIAL SOLUTION (For You) ---\n{solution}\n\n"
f"--- REFERENCE TEXT ---\n{context_text}\n\n"
f"--- CANDIDATE ANSWER ---\n{user_answer}\n\n"
f"TASK: Grade the candidate.\n"
f"1. Compare their answer to the Official Solution and Reference Text.\n"
f"2. Look for technical precision (e.g., 'G rises' vs 'Weight moves').\n"
f"3. Provide a numeric grade and a structured critique.\n\n"
f"OUTPUT FORMAT:\n"
f"**Grade:** [0-10]/10\n"
f"**Critique:** [Your detailed feedback. Be firm but constructive. Highlight specifically what they missed (e.g., 'You identified the List, but failed to identify the Loll.').]"
)
# Legacy prompts (keep for safety if you switch modes)
def construct_acronym_grading_prompt(self, term, correct_definition, user_answer):
return f"Term: {term}\nDefinition: {correct_definition}\nAnswer: {user_answer}\nGrade PASS/FAIL." |