rairo commited on
Commit
d8086ad
·
verified ·
1 Parent(s): e422cf9

Create lesson_gen.py

Browse files
Files changed (1) hide show
  1. lesson_gen.py +253 -0
lesson_gen.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # lesson_gen.py
2
+ import os
3
+ import io
4
+ import json
5
+ import logging
6
+ import uuid
7
+ import tempfile
8
+ import re
9
+ from pathlib import Path
10
+ import numpy as np
11
+ import requests
12
+
13
+ # LangChain for data sourcing
14
+ from langchain_community.document_loaders import ArxivLoader
15
+
16
+ # Google Gemini
17
+ from langchain_google_genai import ChatGoogleGenerativeAI
18
+
19
+ # Video, Audio, and Animation
20
+ from moviepy.editor import *
21
+ from PIL import Image, ImageDraw, ImageFont
22
+ import matplotlib
23
+ matplotlib.use('Agg') # Use non-interactive backend
24
+ import matplotlib.pyplot as plt
25
+ from matplotlib.animation import FuncAnimation, FFMpegWriter
26
+
27
+ # --- Configuration ---
28
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - [%(funcName)s] - %(message)s')
29
+ FPS, WIDTH, HEIGHT = 24, 1280, 720
30
+
31
+ # --- Helper Functions ---
32
+ def deepgram_tts(txt: str, voice_model: str = 'aura-2-andromeda-en'):
33
+ """Calls the Deepgram API to convert text to speech."""
34
+ DG_KEY = os.getenv("DEEPGRAM_API_KEY")
35
+ if not DG_KEY or not txt: return None
36
+ try:
37
+ r = requests.post(
38
+ "https://api.deepgram.com/v1/speak",
39
+ params={"model": voice_model},
40
+ headers={"Authorization": f"Token {DG_KEY}", "Content-Type": "application/json"},
41
+ json={"text": txt},
42
+ timeout=45
43
+ )
44
+ r.raise_for_status()
45
+ return r.content
46
+ except Exception as e:
47
+ logging.error(f"Deepgram TTS failed: {e}")
48
+ return None
49
+
50
+ # --- AI & Content Generation ---
51
+ def get_llm():
52
+ """Initializes and returns the Gemini 2.5 Flash LLM."""
53
+ return ChatGoogleGenerativeAI(model="gemini-2.5-flash", google_api_key=os.getenv("GOOGLE_API_KEY"), temperature=0.5)
54
+
55
+ def fetch_arxiv_papers(topic: str, count=3):
56
+ """Fetches recent paper abstracts from arXiv related to a topic."""
57
+ logging.info(f"Fetching {count} arXiv papers for topic: '{topic}'")
58
+ try:
59
+ loader = ArxivLoader(query=topic, load_max_docs=count, load_all_available_meta=True)
60
+ docs = loader.load()
61
+ logging.info(f"Successfully fetched {len(docs)} documents from arXiv.")
62
+ return docs
63
+ except Exception as e:
64
+ logging.error(f"Failed to fetch from arXiv: {e}")
65
+ return []
66
+
67
+ def generate_knowledge_base(topic: str, level: str, goal: str, arxiv_docs: list):
68
+ """Synthesizes arXiv papers into a structured Knowledge Base for the course."""
69
+ logging.info(f"Generating Knowledge Base for topic: {topic}")
70
+ llm = get_llm()
71
+
72
+ papers_context = "\n\n".join([f"Title: {doc.metadata.get('Title', 'N/A')}\nAbstract: {doc.page_content}" for doc in arxiv_docs])
73
+
74
+ prompt = f"""
75
+ You are an expert curriculum designer specializing in AI. Your task is to create a structured Knowledge Base for a personalized course on the topic: "{topic}".
76
+
77
+ The learner's details are:
78
+ - Skill Level: {level}
79
+ - Learning Goal: {goal}
80
+
81
+ Synthesize the following cutting-edge research from arXiv to create the course foundation:
82
+ ---
83
+ {papers_context}
84
+ ---
85
+
86
+ Based on the user's goal and level, and the provided research, generate a JSON object with the following structure:
87
+ 1. "topic": The main topic.
88
+ 2. "introduction": A brief, engaging introduction tailored to the learner's level.
89
+ 3. "learning_path": An array of 5-7 key concepts that form the course outline. Each concept should be a string. Example: ["Introduction to Transformers", "The Attention Mechanism", "BERT and its Variants"].
90
+ 4. "detailed_concepts": A dictionary where each key is a concept from the "learning_path" and the value is a detailed explanation (2-3 paragraphs) suitable for the learner's level.
91
+
92
+ Return ONLY the valid JSON object, with no markdown formatting.
93
+ """
94
+ try:
95
+ response = llm.invoke(prompt).content.strip()
96
+ if response.startswith("```json"): response = response[7:-3]
97
+ knowledge_base = json.loads(response)
98
+ logging.info("Successfully generated Knowledge Base.")
99
+ return knowledge_base
100
+ except Exception as e:
101
+ logging.error(f"Failed to generate Knowledge Base: {e}")
102
+ raise
103
+
104
+ def generate_lesson_from_knowledge_base(knowledge_base: dict, concept_to_cover: str):
105
+ """Generates a script and quiz for a lesson, strategically inserting animation tags."""
106
+ logging.info(f"Generating lesson for concept: '{concept_to_cover}'")
107
+ llm = get_llm()
108
+ concept_details = knowledge_base.get("detailed_concepts", {}).get(concept_to_cover, "")
109
+
110
+ available_animations = ["Linear Regression"]
111
+ animation_instruction = ""
112
+ if concept_to_cover in available_animations:
113
+ animation_tag = concept_to_cover.lower().replace(" ", "_")
114
+ animation_instruction = f'When explaining the core mechanism of {concept_to_cover}, you MUST insert the tag `<animate_matplotlib: "{animation_tag}">` in the script. This is crucial for visualization.'
115
+
116
+ prompt = f"""
117
+ You are ProfAI, an engaging AI professor creating a lesson on "{concept_to_cover}".
118
+ Detailed information:
119
+ ---
120
+ {concept_details}
121
+ ---
122
+ {animation_instruction}
123
+
124
+ Generate a JSON object with "script" (a 60-90 second video script) and "quiz" (3 multiple-choice questions).
125
+ The script should be conversational and easy to understand.
126
+ Return ONLY the valid JSON object.
127
+ """
128
+ try:
129
+ response = llm.invoke(prompt).content.strip()
130
+ if response.startswith("```json"): response = response[7:-3]
131
+ return json.loads(response)
132
+ except Exception as e:
133
+ logging.error(f"Failed to generate lesson content: {e}")
134
+ raise
135
+
136
+ def generate_remedial_lesson(failed_concept: str):
137
+ """Generates a short, focused remedial lesson."""
138
+ logging.info(f"Generating remedial lesson for concept: '{failed_concept}'")
139
+ llm = get_llm()
140
+ prompt = f"""
141
+ You are ProfAI. A student struggled to understand the concept of "{failed_concept}".
142
+ Your task is to create a short, remedial micro-lesson to help them.
143
+
144
+ Generate a JSON object with two keys:
145
+ 1. "script": A very simple, concise script (30-45 seconds) explaining "{failed_concept}" with a different analogy or a simpler approach.
146
+ 2. "quiz": An array with ONE multiple-choice question to confirm their understanding.
147
+
148
+ Return ONLY the valid JSON object.
149
+ """
150
+ try:
151
+ response = llm.invoke(prompt).content.strip()
152
+ if response.startswith("```json"): response = response[7:-3]
153
+ return json.loads(response)
154
+ except Exception as e:
155
+ logging.error(f"Failed to generate remedial lesson: {e}")
156
+ raise
157
+
158
+ # --- Animation & Video Generation ---
159
+ def animate_linear_regression(duration, temp_dir):
160
+ """Generates a Matplotlib animation of Linear Regression."""
161
+ logging.info("Generating Matplotlib animation for Linear Regression.")
162
+ fig, ax = plt.subplots(figsize=(WIDTH/100, HEIGHT/100))
163
+ np.random.seed(42)
164
+ X = 2 * np.random.rand(100, 1)
165
+ y = 4 + 3 * X + np.random.randn(100, 1)
166
+ ax.scatter(X, y, alpha=0.6, label='Data Points')
167
+
168
+ line, = ax.plot([], [], 'r-', lw=3, label='Regression Line')
169
+ ax.set_xlim(0, 2)
170
+ ax.set_ylim(0, 15)
171
+ ax.set_title("Linear Regression: Finding the Best Fit Line", fontsize=20)
172
+ ax.set_xlabel("Feature (X)", fontsize=14)
173
+ ax.set_ylabel("Target (y)", fontsize=14)
174
+ ax.grid(True, linestyle='--', alpha=0.6)
175
+ ax.legend()
176
+ plt.tight_layout()
177
+
178
+ def init():
179
+ line.set_data([], [])
180
+ return line,
181
+
182
+ def update(frame):
183
+ # Animate the line converging to the best fit
184
+ # This is a simplified animation for demonstration
185
+ progress = frame / (duration * FPS)
186
+ slope = 3 * progress
187
+ intercept = 4
188
+ x_vals = np.array([0, 2])
189
+ y_vals = intercept + slope * x_vals
190
+ line.set_data(x_vals, y_vals)
191
+ return line,
192
+
193
+ anim = FuncAnimation(fig, update, frames=int(duration * FPS), init_func=init, blit=True)
194
+ output_path = temp_dir / f"anim_{uuid.uuid4().hex}.mp4"
195
+ anim.save(str(output_path), writer=FFMpegWriter(fps=FPS))
196
+ plt.close(fig)
197
+ logging.info(f"Matplotlib animation saved to {output_path}")
198
+ return VideoFileClip(str(output_path))
199
+
200
+ def generate_matplotlib_animation(concept_tag: str, duration: float, temp_dir: Path):
201
+ """Router to generate the correct Matplotlib animation based on a tag."""
202
+ if concept_tag == "linear_regression":
203
+ return animate_linear_regression(duration, temp_dir)
204
+ return None
205
+
206
+ def create_lesson_video(script: str, narration_audio_bytes: bytes):
207
+ """Creates a complete lesson video, incorporating Matplotlib animations if tagged."""
208
+ logging.info("Starting comprehensive video generation.")
209
+
210
+ with tempfile.TemporaryDirectory() as temp_dir_str:
211
+ temp_dir = Path(temp_dir_str)
212
+ audio_path = temp_dir / "narration.mp3"
213
+ audio_path.write_bytes(narration_audio_bytes)
214
+ audio_clip = AudioFileClip(str(audio_path))
215
+ total_duration = audio_clip.duration
216
+
217
+ tag_pattern = r'(<animate_matplotlib: "([^"]+)">)'
218
+ script_parts = re.split(tag_pattern, script)
219
+
220
+ text_segments = [s for s in script_parts[::3] if s.strip()]
221
+ tags = script_parts[2::3]
222
+
223
+ final_clips = []
224
+ running_time = 0
225
+
226
+ # This allocation is simplified; a more robust method might time the audio parts.
227
+ total_text_chars = sum(len(s) for s in text_segments)
228
+ time_per_char = total_duration / total_text_chars if total_text_chars > 0 else 0
229
+
230
+ # Create clips for each segment
231
+ for i, text_part in enumerate(text_segments):
232
+ part_duration = len(text_part) * time_per_char
233
+ txt_clip = TextClip(text_part.strip(), fontsize=40, color='white', font='Arial-Bold', size=(WIDTH*0.8, None), method='caption').set_duration(part_duration)
234
+ final_clips.append(txt_clip.set_start(running_time).set_position('center'))
235
+ running_time += part_duration
236
+
237
+ if i < len(tags):
238
+ anim_duration = 7 # Fixed duration for matplotlib animations
239
+ anim_clip = generate_matplotlib_animation(tags[i], anim_duration, temp_dir)
240
+ if anim_clip:
241
+ final_clips.append(anim_clip.set_duration(anim_duration).set_start(running_time).set_position('center'))
242
+ running_time += anim_duration
243
+
244
+ final_duration = running_time
245
+ bg_clip = ColorClip(size=(WIDTH, HEIGHT), color=(20, 20, 40)).set_duration(final_duration)
246
+
247
+ final_video = CompositeVideoClip([bg_clip] + final_clips)
248
+ final_video = final_video.set_audio(audio_clip.set_duration(final_duration))
249
+
250
+ output_path = temp_dir / "final_video.mp4"
251
+ final_video.write_videofile(str(output_path), codec='libx264', fps=FPS, threads=4, logger='bar')
252
+
253
+ return Path(output_path).read_bytes()