File size: 10,123 Bytes
1221662
 
 
 
 
81f3f6f
 
 
 
 
 
 
 
 
 
 
 
1221662
 
 
 
81f3f6f
1221662
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
05245c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81f3f6f
05245c5
 
 
 
 
 
81f3f6f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
05245c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
import google.generativeai as genai
from concurrent.futures import ThreadPoolExecutor
import threading
from collections import deque
import os
from dotenv import load_dotenv
from pathlib import Path

# Get the absolute path to the .env file
#env_path = Path(__file__).parent.parent / '.env'
#print("Looking for .env file at:", env_path)

# Load environment variables
#load_dotenv(dotenv_path=env_path)

# Debug: Print all environment variables
#print("Environment variables:", os.environ.get('Gemini_API_key'))

# Configure Google AI API
GOOGLE_API_KEY = os.getenv('Gemini_API_key')
if not GOOGLE_API_KEY:
    raise ValueError("Gemini_API_key environment variable is not set")
genai.configure(api_key=GOOGLE_API_KEY)

# Set up the model
generation_config = {
    "temperature": 0.8,
    "top_p": 1,
    "top_k": 1,
    "max_output_tokens": 2048,
}

safety_settings = [
    {
        "category": "HARM_CATEGORY_HARASSMENT",
        "threshold": "BLOCK_MEDIUM_AND_ABOVE"
    },
    {
        "category": "HARM_CATEGORY_HATE_SPEECH",
        "threshold": "BLOCK_MEDIUM_AND_ABOVE"
    },
    {
        "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
        "threshold": "BLOCK_MEDIUM_AND_ABOVE"
    },
    {
        "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
        "threshold": "BLOCK_MEDIUM_AND_ABOVE"
    },
]

# Initialize Gemini model with configuration
model = genai.GenerativeModel(
    model_name="gemini-2.5-flash",
    generation_config=generation_config,
    safety_settings=safety_settings
)

class History:
    def __init__(self, max_save_round):
        self.max_save_round = max_save_round
        self.history = deque(maxlen=self.max_save_round)
    
    def get_from_list(self, lst):
        for item in lst[:-1]:
            if item['type'] == 'request':
                self.history.append({'role': 'user', 'content': item['content']})
            else:
                self.history.append({'role': 'model', 'content': item['content']})

    def save_history(self, role, content):
        self.history.append({'role': role, 'content': content})
    
    def clear_history(self):
        self.history.clear()

    def get_histories(self, n=None):
        n = n if n is not None and n < len(self.history) else len(self.history)
        return list(self.history)[-n:]
    
    def append_to_last_history(self, increment):
        if not self.history:
            raise IndexError("No history to append to.")
        last_record = self.history[-1]
        last_record['content'] += increment
        self.history[-1] = last_record

history_rounds = 20

def get_answer(client, history, system_prompt, user_prompt, printing=True, save=True):
    '''
    Generate a response using Gemini model
    '''
    # Combine system prompt and user prompt
    full_prompt = f"{system_prompt}\n\n{user_prompt}"
    
    # Generate response
    response = model.generate_content(full_prompt, stream=True)
    
    answer = ''
    for chunk in response:
        if chunk.text:
            answer += chunk.text
            if printing:
                print(chunk.text, end="")
    
    if save:
        history.save_history('user', user_prompt)
        history.save_history('model', answer)
    
    return answer

def get_answer_generator(client, history, system_prompt, user_prompt, printing=True):
    '''
    Generator function for streaming responses
    '''
    # Combine system prompt and user prompt
    full_prompt = f"{system_prompt}\n\n{user_prompt}"
    
    # Save initial history
    history.save_history('user', user_prompt)
    history.save_history('model', '')
    
    # Generate streaming response
    response = model.generate_content(full_prompt, stream=True)
    
    for chunk in response:
        if chunk.text:
            history.append_to_last_history(chunk.text)
            if printing:
                print(chunk.text, end='')
            yield chunk.text

def get_answer_single(client, system_prompt, user_prompt, printing=False):
    '''
    Get a single response without streaming
    '''
    # Combine system prompt and user prompt
    full_prompt = f"{system_prompt}\n\n{user_prompt}"
    
    # Generate response
    response = model.generate_content(full_prompt)
    answer = response.text
    
    if printing:
        print(answer)
    
    return answer

def get_answers_parallel(client, system_prompt, user_prompt, n=1, printing=False):
    '''
    Get multiple responses in parallel
    '''
    answers = []
    print_lock = threading.Lock()

    def get_and_print_answer():
        answer = get_answer_single(client, system_prompt, user_prompt, printing=False)
        if printing:
            with print_lock:
                print(answer)
        return answer

    with ThreadPoolExecutor(max_workers=n) as executor:
        futures = [executor.submit(get_and_print_answer) for _ in range(n)]
        for future in futures:
            answers.append(future.result())

    return answers 

def generate_lesson_plan(lesson_name, lesson_theme, grade_level, objectives, materials, duration, steps, assessment):
    """
    Generate a complete lesson plan using the provided components.
    """
    system_prompt = """You are an expert educator. Create a comprehensive lesson plan using the provided components.
    Format the output in a clear, professional manner with appropriate sections and bullet points."""
    
    user_prompt = f"""Create a lesson plan with the following details:
    
    Course Name: {lesson_name}
    Theme: {lesson_theme}
    Grade Level: {grade_level}
    
    Learning Objectives:
    {objectives}
    
    Materials Needed:
    {materials}
    
    Duration: {duration} minutes
    
    Lesson Steps:
    {steps}
    
    Assessment:
    {assessment}
    
    Please format this into a professional lesson plan with clear sections and bullet points."""
    
    return get_answer_single(None, system_prompt, user_prompt)

def generate_objectives(lesson_theme, lesson_name=None, lesson_type=None, existing_goals=None):
    """
    Generate learning objectives based on Bloom's Taxonomy.
    """
    system_prompt = """You are an expert educator. Generate clear, measurable learning objectives based on Bloom's Taxonomy
    for the given lesson theme. Include objectives at different levels of Bloom's Taxonomy."""
    
    if existing_goals:
        user_prompt = f"""I am preparing a lesson for {lesson_name} on {lesson_theme}, targeting {lesson_type} students who have not studied this course before.
Please create several course objectives following Bloom's Taxonomy, using all 6 levels. Note that this is a textarea field, so please avoid markdown formatting and ensure the result is within 8 lines.

I will provide you with some content that I want or tend to include. Please make sure to incorporate these elements, though the teacher may have partially modified or removed some content, which you should judge accordingly: '{existing_goals}'."""
    else:
        user_prompt = f"""I am preparing a lesson for {lesson_name} on {lesson_theme}, targeting {lesson_type} students who have not studied this course before.
Please create several course objectives following Bloom's Taxonomy, using all 6 levels. Note that this is a textarea field, so please avoid markdown formatting and ensure the result is within 15 lines.

Example:
Input: Data Structures and Algorithms  Quick Sort
Course Objectives:
    1. Students will remember the steps of the Quick Sort algorithm and its fundamental principles such as "divide and conquer" and recursion.
    2. Students will understand how Quick Sort works, including how to select pivot points and perform partitioning operations.
    3. Students will be able to apply the Quick Sort algorithm to manually sort a set of numbers and implement it in a programming language.
    4. Students will analyze the execution process of Quick Sort, identifying how algorithm performance may vary with different pivot selections.
    5. Students will evaluate the performance of Quick Sort, understanding its efficiency differences compared to Bubble, Selection, and Insertion Sort in various scenarios.
    6. Students will explore improvements to the Quick Sort algorithm, such as the median-of-three method or randomized pivot selection, to optimize algorithm performance."""
    
    return get_answer_single(None, system_prompt, user_prompt)

def generate_materials(lesson_theme, grade_level):
    """
    Generate a list of required materials for the lesson.
    """
    system_prompt = """You are an expert educator. Generate a comprehensive list of materials needed for the lesson,
    considering the grade level and theme. Include both physical materials and digital resources if applicable."""
    
    user_prompt = f"List all materials needed to teach {lesson_theme} to {grade_level} students. Format as bullet points."
    
    return get_answer_single(None, system_prompt, user_prompt)

def generate_steps(lesson_theme, grade_level, duration):
    """
    Generate detailed lesson steps with timing.
    """
    system_prompt = """You are an expert educator. Create a detailed, step-by-step lesson plan that includes timing
    for each activity. Consider the grade level and ensure the steps are age-appropriate."""
    
    user_prompt = f"""Create a {duration}-minute lesson plan for teaching {lesson_theme} to {grade_level} students.
    Include specific timing for each step and clear instructions. Format as numbered steps with timing."""
    
    return get_answer_single(None, system_prompt, user_prompt)

def generate_assessment(lesson_theme, grade_level):
    """
    Generate assessment methods for the lesson.
    """
    system_prompt = """You are an expert educator. Create appropriate assessment methods for the lesson,
    considering the grade level and learning objectives. Include both formative and summative assessments."""
    
    user_prompt = f"Generate assessment methods for evaluating student understanding of {lesson_theme} at the {grade_level} level. Include both formative and summative assessments."
    
    return get_answer_single(None, system_prompt, user_prompt)