quiz-generator-v3 / learning_objective_generator /suggestion_improvement.py
ecuartasm's picture
Initial commit: AI Course Assessment Generator
217abc3
from typing import List, Tuple
import os
import json
from openai import OpenAI
from models import LearningObjective
from prompts.incorrect_answers import INCORRECT_ANSWER_EXAMPLES_WITH_EXPLANATION
def _get_run_manager():
"""Get run manager if available, otherwise return None."""
try:
from ui.run_manager import get_run_manager
return get_run_manager()
except:
return None
def should_regenerate_individual_suggestion(client: OpenAI, model: str, temperature: float,
learning_objective: LearningObjective,
option: str,
file_contents: List[str]) -> Tuple[bool, str]:
"""
Check if an individual incorrect answer option needs regeneration.
Args:
client: OpenAI client
model: Model name to use for regeneration
temperature: Temperature for generation
learning_objective: Learning objective to check
option: The individual option to check
file_contents: List of file contents with source tags
Returns:
Tuple of (needs_regeneration, reason)
"""
# Extract relevant content from file_contents
combined_content = ""
if hasattr(learning_objective, 'source_reference') and learning_objective.source_reference:
source_references = learning_objective.source_reference if isinstance(learning_objective.source_reference, list) else [learning_objective.source_reference]
for source_file in source_references:
for file_content in file_contents:
if f"<source file='{source_file}'>" in file_content:
if combined_content:
combined_content += "\n\n"
combined_content += file_content
break
# If no content found, use all content
if not combined_content:
combined_content = "\n\n".join(file_contents)
# Create a prompt to evaluate the individual suggestion
prompt = f"""
You are evaluating the quality of an incorrect answer suggestion for a learning objective. You are going to the incorrect answer option and determine if it needs to be regenerated.
Learning Objective: {learning_objective.learning_objective}
Use the correct answer to help you make informed decisions:
Correct Answer: {learning_objective.correct_answer}
Incorrect Answer Option to Evaluate: {option}
Use the relevant content from the course content to help you make informed decisions:
COURSE CONTENT:
{combined_content}
Here are some examples of high quality incorrect answer suggestions which you should use to make informed decisions about whether regeneration of options is needed:
<incorrect_answer_examples_with_explanation>
{INCORRECT_ANSWER_EXAMPLES_WITH_EXPLANATION}
</incorrect_answer_examples_with_explanation>
Based on the above examples, evaluate this incorrect answer suggestion.
Respond with TRUE if the incorrect answer suggestion needs regeneration, or FALSE if it is good quality.
If TRUE, briefly explain why regeneration is needed in this format: "true – reason for regeneration". Cite the examples with explanation that you used to make your decision.
If FALSE, respond with just "false".
"""
# Use a lightweight model for evaluation
params = {
"model": "gpt-5-mini",
"messages": [
{"role": "system", "content": "You are an expert in educational assessment design and will determine if an incorrect answer option needs to be regenerated according to a set of quality standards, and examples of good and bad incorrect answer options."},
{"role": "user", "content": prompt}
]
}
try:
completion = client.chat.completions.create(**params)
response_text = completion.choices[0].message.content.strip().lower()
# Check if regeneration is needed and extract reason
needs_regeneration = response_text.startswith("true")
reason = ""
if needs_regeneration and "–" in response_text:
parts = response_text.split("–", 1)
if len(parts) > 1:
reason = "– " + parts[1].strip()
# Log the evaluation result
run_manager = _get_run_manager()
if needs_regeneration:
# # Create debug directory if it doesn't exist
# debug_dir = os.path.join("incorrect_suggestion_debug")
# os.makedirs(debug_dir, exist_ok=True)
# suggestion_id = learning_objective.incorrect_answer_options.index(suggestion) if suggestion in learning_objective.incorrect_answer_options else "unknown"
# with open(os.path.join(debug_dir, f"lo_{learning_objective.id}_suggestion_{suggestion_id}_evaluation.txt"), "w") as f:
# f.write(f"Learning Objective: {learning_objective.learning_objective}\n")
# f.write(f"Correct Answer: {learning_objective.correct_answer}\n")
# f.write(f"Incorrect Answer Option: {option}\n\n")
# f.write(f"Evaluation Response: {response_text}\n")
if run_manager:
run_manager.log(f"Option '{option[:50]}...' needs regeneration: True - {reason}", level="DEBUG")
else:
print(f"Option '{option[:50]}...' needs regeneration: True - {reason}")
else:
if run_manager:
run_manager.log(f"Option '{option[:50]}...' is good quality, keeping as is", level="DEBUG")
else:
print(f"Option '{option[:50]}...' is good quality, keeping as is")
return needs_regeneration, reason
except Exception as e:
run_manager = _get_run_manager()
if run_manager:
run_manager.log(f"Error evaluating option '{option[:50]}...': {e}", level="ERROR")
else:
print(f"Error evaluating option '{option[:50]}...': {e}")
# If there's an error, assume regeneration is needed with a generic reason
return True, "– error during evaluation"
def regenerate_individual_suggestion(client: OpenAI, model: str, temperature: float,
learning_objective: LearningObjective,
option_to_replace: str,
file_contents: List[str],
reason: str = "") -> str:
"""
Regenerate an individual incorrect answer option.
Args:
client: OpenAI client
model: Model name to use for regeneration
temperature: Temperature for generation
learning_objective: Learning objective containing the option
option_to_replace: The incorrect answer option to replace
file_contents: List of file contents with source tags
reason: The reason for regeneration (optional)
Returns:
A new incorrect answer option
"""
run_manager = _get_run_manager()
if run_manager:
run_manager.log(f"Regenerating suggestion for learning objective {learning_objective.id}", level="DEBUG")
else:
print(f"Regenerating suggestion for learning objective {learning_objective.id}")
# Extract relevant content from file_contents
combined_content = ""
if hasattr(learning_objective, 'source_reference') and learning_objective.source_reference:
source_references = learning_objective.source_reference if isinstance(learning_objective.source_reference, list) else [learning_objective.source_reference]
for source_file in source_references:
for file_content in file_contents:
if f"<source file='{source_file}'>" in file_content:
if combined_content:
combined_content += "\n\n"
combined_content += file_content
break
# If no content found, use all content
if not combined_content:
combined_content = "\n\n".join(file_contents)
# If no reason provided, use a default one
if not reason:
reason = "– no reason provided"
# Create a prompt to regenerate the suggestion
prompt = f"""
You are generating a high-quality incorrect answer option for a learning objective.
Consider the learning objective and it's correct answer to generate an incorrect answer option.
Learning Objective: {learning_objective.learning_objective}
Correct Answer: {learning_objective.correct_answer}
Current Incorrect Answer Options:
{json.dumps(learning_objective.incorrect_answer_options, indent=2)}
The following option needs improvement: {option_to_replace}
Consider the following reason for improvement in order to make the option better: {reason}
Use the relevant content from the course content to help you make informed decisions:
COURSE CONTENT:
{combined_content}
Refer to the examples with explanation below to generate a new incorrect answer option:
<incorrect_answer_examples_with_explanation>
{INCORRECT_ANSWER_EXAMPLES_WITH_EXPLANATION}
</incorrect_answer_examples_with_explanation>
Based on the above quality standards and examples, generate a new incorrect answer option.
Provide ONLY the new incorrect answer option, with no additional explanation.
"""
# # Use the specified model for regeneration
# params = {
# "model": model,
# "messages": [
# {"role": "system", "content": "You are an expert in educational assessment design."},
# {"role": "user", "content": prompt}
# ],
# "temperature": temperature
# }
params = {
"model": "gpt-5-mini",
"messages": [
{"role": "system", "content": "You are an expert in educational assessment design. You will generate a new incorrect answer option for a learning objective based on a set of quality standards, and examples of good and bad incorrect answer options."},
{"role": "user", "content": prompt}
]
}
try:
completion = client.chat.completions.create(**params)
new_suggestion = completion.choices[0].message.content.strip()
# Only create debug files if the suggestion actually changed
run_manager = _get_run_manager()
if new_suggestion != option_to_replace:
# Create debug directory if it doesn't exist
debug_dir = os.path.join("incorrect_suggestion_debug")
os.makedirs(debug_dir, exist_ok=True)
# Log the regeneration in the question-style format
suggestion_id = learning_objective.incorrect_answer_options.index(option_to_replace) if option_to_replace in learning_objective.incorrect_answer_options else "unknown"
# Format the log message in the same format as question regeneration
log_message = f"""Learning Objective ID: {learning_objective.id}
Learning Objective: {learning_objective.learning_objective}
REASON FOR REGENERATION:
{reason}
BEFORE:
Option Text: {option_to_replace}
Feedback: Incorrect answer representing a common misconception.
AFTER:
Option Text: {new_suggestion}
Feedback: Incorrect answer representing a common misconception.
"""
# Write to the log file
log_file = os.path.join(debug_dir, f"lo_{learning_objective.id}_suggestion_{suggestion_id}.txt")
with open(log_file, "w") as f:
f.write(log_message)
# Also log to run manager
if run_manager:
run_manager.log(f"Regenerated Option for Learning Objective {learning_objective.id}, Option {suggestion_id}", level="DEBUG")
run_manager.log(f"BEFORE: {option_to_replace[:80]}...", level="DEBUG")
run_manager.log(f"AFTER: {new_suggestion[:80]}...", level="DEBUG")
run_manager.log(f"Log saved to {log_file}", level="DEBUG")
else:
print(f"\n--- Regenerated Option for Learning Objective {learning_objective.id}, Option {suggestion_id} ---")
print(f"BEFORE: {option_to_replace}")
print(f"AFTER: {new_suggestion}")
print(f"Log saved to {log_file}")
else:
if run_manager:
run_manager.log(f"Generated option is identical to original, not saving debug file", level="DEBUG")
else:
print(f"Generated option is identical to original, not saving debug file")
return new_suggestion
except Exception as e:
run_manager = _get_run_manager()
if run_manager:
run_manager.log(f"Error regenerating option: {e}", level="ERROR")
else:
print(f"Error regenerating option: {e}")
# If there's an error, return the original option
return option_to_replace
def regenerate_incorrect_answers(client: OpenAI, model: str, temperature: float,
learning_objectives: List[LearningObjective],
file_contents: List[str]) -> List[LearningObjective]:
"""
Regenerate incorrect answer options for all learning objectives.
Args:
client: OpenAI client
model: Model name to use for regeneration
temperature: Temperature for generation
learning_objectives: List of learning objectives to improve
file_contents: List of file contents with source tags
Returns:
The same list of learning objectives with improved incorrect answer options
"""
run_manager = _get_run_manager()
if run_manager:
run_manager.log(f"Regenerating incorrect answers for {len(learning_objectives)} learning objectives", level="INFO")
else:
print(f"Regenerating incorrect answers for {len(learning_objectives)} learning objectives")
for i, lo in enumerate(learning_objectives):
if run_manager:
run_manager.log(f"Processing learning objective {i+1}/{len(learning_objectives)}: {lo.id}", level="INFO")
else:
print(f"Processing learning objective {i+1}/{len(learning_objectives)}: {lo.id}")
# Check each suggestion individually
if lo.incorrect_answer_options:
new_suggestions = []
for j, option in enumerate(lo.incorrect_answer_options):
# Check if this specific suggestion needs regeneration
needs_regeneration, reason = should_regenerate_individual_suggestion(client, model, temperature, lo, option, file_contents)
if needs_regeneration:
# Regenerate this specific suggestion with the reason
if run_manager:
run_manager.log(f"Regenerating option '{option[:50]}...' for learning objective {lo.id}", level="INFO")
else:
print(f"Regenerating option '{option[:50]}...' for learning objective {lo.id}")
# Initialize variables for the regeneration loop
current_option = option
max_iterations = 5
iteration = 0
# Loop until we get a good option or reach max iterations
while needs_regeneration and iteration < max_iterations:
iteration += 1
if run_manager:
run_manager.log(f" Regeneration attempt {iteration}/{max_iterations}", level="INFO")
else:
print(f" Regeneration attempt {iteration}/{max_iterations}")
# Regenerate the option
new_option = regenerate_individual_suggestion(client, model, temperature, lo, current_option, file_contents, reason)
# Check if the new option still needs regeneration
if iteration < max_iterations: # Skip check on last iteration to save API calls
needs_regeneration, new_reason = should_regenerate_individual_suggestion(client, model, temperature, lo, new_option, file_contents)
if needs_regeneration:
if run_manager:
run_manager.log(f" Regenerated option still needs improvement: {new_reason}", level="DEBUG")
else:
print(f" Regenerated option still needs improvement: {new_reason}")
current_option = new_option
reason = new_reason
else:
if run_manager:
run_manager.log(f" Regenerated option passes quality check on attempt {iteration}", level="INFO")
else:
print(f" Regenerated option passes quality check on attempt {iteration}")
else:
needs_regeneration = False
# Use the final regenerated option
new_suggestions.append(new_option)
else:
# Keep the original suggestion
if run_manager:
run_manager.log(f"Keeping original option '{option[:50]}...' for learning objective {lo.id}", level="INFO")
else:
print(f"Keeping original option '{option[:50]}...' for learning objective {lo.id}")
new_suggestions.append(option)
# Update the learning objective with the new suggestions
lo.incorrect_answer_options = new_suggestions
else:
# If there are no suggestions, generate completely new ones
if run_manager:
run_manager.log(f"No incorrect answer options found for learning objective {lo.id}, generating new ones", level="INFO")
else:
print(f"No incorrect answer options found for learning objective {lo.id}, generating new ones")
# This would typically call back to the enhancement.py function, but to avoid circular imports,
# we'll just leave it empty and let the next generation cycle handle it
lo.incorrect_answer_options = []
return learning_objectives