lesson_plan_generator / src /gpt_method.py
raymondEDS
adding bloom framework
81f3f6f
import google.generativeai as genai
from concurrent.futures import ThreadPoolExecutor
import threading
from collections import deque
import os
from dotenv import load_dotenv
from pathlib import Path
# Get the absolute path to the .env file
#env_path = Path(__file__).parent.parent / '.env'
#print("Looking for .env file at:", env_path)
# Load environment variables
#load_dotenv(dotenv_path=env_path)
# Debug: Print all environment variables
#print("Environment variables:", os.environ.get('Gemini_API_key'))
# Configure Google AI API
GOOGLE_API_KEY = os.getenv('Gemini_API_key')
if not GOOGLE_API_KEY:
raise ValueError("Gemini_API_key environment variable is not set")
genai.configure(api_key=GOOGLE_API_KEY)
# Set up the model
generation_config = {
"temperature": 0.8,
"top_p": 1,
"top_k": 1,
"max_output_tokens": 2048,
}
safety_settings = [
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
},
]
# Initialize Gemini model with configuration
model = genai.GenerativeModel(
model_name="gemini-2.5-flash",
generation_config=generation_config,
safety_settings=safety_settings
)
class History:
def __init__(self, max_save_round):
self.max_save_round = max_save_round
self.history = deque(maxlen=self.max_save_round)
def get_from_list(self, lst):
for item in lst[:-1]:
if item['type'] == 'request':
self.history.append({'role': 'user', 'content': item['content']})
else:
self.history.append({'role': 'model', 'content': item['content']})
def save_history(self, role, content):
self.history.append({'role': role, 'content': content})
def clear_history(self):
self.history.clear()
def get_histories(self, n=None):
n = n if n is not None and n < len(self.history) else len(self.history)
return list(self.history)[-n:]
def append_to_last_history(self, increment):
if not self.history:
raise IndexError("No history to append to.")
last_record = self.history[-1]
last_record['content'] += increment
self.history[-1] = last_record
history_rounds = 20
def get_answer(client, history, system_prompt, user_prompt, printing=True, save=True):
'''
Generate a response using Gemini model
'''
# Combine system prompt and user prompt
full_prompt = f"{system_prompt}\n\n{user_prompt}"
# Generate response
response = model.generate_content(full_prompt, stream=True)
answer = ''
for chunk in response:
if chunk.text:
answer += chunk.text
if printing:
print(chunk.text, end="")
if save:
history.save_history('user', user_prompt)
history.save_history('model', answer)
return answer
def get_answer_generator(client, history, system_prompt, user_prompt, printing=True):
'''
Generator function for streaming responses
'''
# Combine system prompt and user prompt
full_prompt = f"{system_prompt}\n\n{user_prompt}"
# Save initial history
history.save_history('user', user_prompt)
history.save_history('model', '')
# Generate streaming response
response = model.generate_content(full_prompt, stream=True)
for chunk in response:
if chunk.text:
history.append_to_last_history(chunk.text)
if printing:
print(chunk.text, end='')
yield chunk.text
def get_answer_single(client, system_prompt, user_prompt, printing=False):
'''
Get a single response without streaming
'''
# Combine system prompt and user prompt
full_prompt = f"{system_prompt}\n\n{user_prompt}"
# Generate response
response = model.generate_content(full_prompt)
answer = response.text
if printing:
print(answer)
return answer
def get_answers_parallel(client, system_prompt, user_prompt, n=1, printing=False):
'''
Get multiple responses in parallel
'''
answers = []
print_lock = threading.Lock()
def get_and_print_answer():
answer = get_answer_single(client, system_prompt, user_prompt, printing=False)
if printing:
with print_lock:
print(answer)
return answer
with ThreadPoolExecutor(max_workers=n) as executor:
futures = [executor.submit(get_and_print_answer) for _ in range(n)]
for future in futures:
answers.append(future.result())
return answers
def generate_lesson_plan(lesson_name, lesson_theme, grade_level, objectives, materials, duration, steps, assessment):
"""
Generate a complete lesson plan using the provided components.
"""
system_prompt = """You are an expert educator. Create a comprehensive lesson plan using the provided components.
Format the output in a clear, professional manner with appropriate sections and bullet points."""
user_prompt = f"""Create a lesson plan with the following details:
Course Name: {lesson_name}
Theme: {lesson_theme}
Grade Level: {grade_level}
Learning Objectives:
{objectives}
Materials Needed:
{materials}
Duration: {duration} minutes
Lesson Steps:
{steps}
Assessment:
{assessment}
Please format this into a professional lesson plan with clear sections and bullet points."""
return get_answer_single(None, system_prompt, user_prompt)
def generate_objectives(lesson_theme, lesson_name=None, lesson_type=None, existing_goals=None):
"""
Generate learning objectives based on Bloom's Taxonomy.
"""
system_prompt = """You are an expert educator. Generate clear, measurable learning objectives based on Bloom's Taxonomy
for the given lesson theme. Include objectives at different levels of Bloom's Taxonomy."""
if existing_goals:
user_prompt = f"""I am preparing a lesson for {lesson_name} on {lesson_theme}, targeting {lesson_type} students who have not studied this course before.
Please create several course objectives following Bloom's Taxonomy, using all 6 levels. Note that this is a textarea field, so please avoid markdown formatting and ensure the result is within 8 lines.
I will provide you with some content that I want or tend to include. Please make sure to incorporate these elements, though the teacher may have partially modified or removed some content, which you should judge accordingly: '{existing_goals}'."""
else:
user_prompt = f"""I am preparing a lesson for {lesson_name} on {lesson_theme}, targeting {lesson_type} students who have not studied this course before.
Please create several course objectives following Bloom's Taxonomy, using all 6 levels. Note that this is a textarea field, so please avoid markdown formatting and ensure the result is within 15 lines.
Example:
Input: Data Structures and Algorithms Quick Sort
Course Objectives:
1. Students will remember the steps of the Quick Sort algorithm and its fundamental principles such as "divide and conquer" and recursion.
2. Students will understand how Quick Sort works, including how to select pivot points and perform partitioning operations.
3. Students will be able to apply the Quick Sort algorithm to manually sort a set of numbers and implement it in a programming language.
4. Students will analyze the execution process of Quick Sort, identifying how algorithm performance may vary with different pivot selections.
5. Students will evaluate the performance of Quick Sort, understanding its efficiency differences compared to Bubble, Selection, and Insertion Sort in various scenarios.
6. Students will explore improvements to the Quick Sort algorithm, such as the median-of-three method or randomized pivot selection, to optimize algorithm performance."""
return get_answer_single(None, system_prompt, user_prompt)
def generate_materials(lesson_theme, grade_level):
"""
Generate a list of required materials for the lesson.
"""
system_prompt = """You are an expert educator. Generate a comprehensive list of materials needed for the lesson,
considering the grade level and theme. Include both physical materials and digital resources if applicable."""
user_prompt = f"List all materials needed to teach {lesson_theme} to {grade_level} students. Format as bullet points."
return get_answer_single(None, system_prompt, user_prompt)
def generate_steps(lesson_theme, grade_level, duration):
"""
Generate detailed lesson steps with timing.
"""
system_prompt = """You are an expert educator. Create a detailed, step-by-step lesson plan that includes timing
for each activity. Consider the grade level and ensure the steps are age-appropriate."""
user_prompt = f"""Create a {duration}-minute lesson plan for teaching {lesson_theme} to {grade_level} students.
Include specific timing for each step and clear instructions. Format as numbered steps with timing."""
return get_answer_single(None, system_prompt, user_prompt)
def generate_assessment(lesson_theme, grade_level):
"""
Generate assessment methods for the lesson.
"""
system_prompt = """You are an expert educator. Create appropriate assessment methods for the lesson,
considering the grade level and learning objectives. Include both formative and summative assessments."""
user_prompt = f"Generate assessment methods for evaluating student understanding of {lesson_theme} at the {grade_level} level. Include both formative and summative assessments."
return get_answer_single(None, system_prompt, user_prompt)