Spaces:
Sleeping
Sleeping
| # lesson_gen.py | |
| import os | |
| import io | |
| import json | |
| import logging | |
| import uuid | |
| import tempfile | |
| import re | |
| from pathlib import Path | |
| import numpy as np | |
| import requests | |
| # LangChain for data sourcing | |
| from langchain_community.document_loaders import ArxivLoader | |
| # Google Gemini | |
| from langchain_google_genai import ChatGoogleGenerativeAI | |
| # Video, Audio, and Animation | |
| from moviepy.editor import * | |
| from PIL import Image, ImageDraw, ImageFont | |
| import matplotlib | |
| matplotlib.use('Agg') # Use non-interactive backend | |
| import matplotlib.pyplot as plt | |
| from matplotlib.animation import FuncAnimation, FFMpegWriter | |
| # --- Configuration --- | |
| logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - [%(funcName)s] - %(message)s') | |
| FPS, WIDTH, HEIGHT = 24, 1280, 720 | |
| # --- Helper Functions --- | |
| def deepgram_tts(txt: str, voice_model: str = 'aura-2-andromeda-en'): | |
| """Calls the Deepgram API to convert text to speech.""" | |
| DG_KEY = os.getenv("DEEPGRAM_API_KEY") | |
| if not DG_KEY or not txt: return None | |
| try: | |
| r = requests.post( | |
| "https://api.deepgram.com/v1/speak", | |
| params={"model": voice_model}, | |
| headers={"Authorization": f"Token {DG_KEY}", "Content-Type": "application/json"}, | |
| json={"text": txt}, | |
| timeout=45 | |
| ) | |
| r.raise_for_status() | |
| return r.content | |
| except Exception as e: | |
| logging.error(f"Deepgram TTS failed: {e}") | |
| return None | |
| # --- AI & Content Generation --- | |
| def get_llm(): | |
| """Initializes and returns the Gemini 2.5 Flash LLM.""" | |
| return ChatGoogleGenerativeAI(model="gemini-2.5-flash", google_api_key=os.getenv("GOOGLE_API_KEY"), temperature=0.5) | |
| def fetch_arxiv_papers(topic: str, count=3): | |
| """Fetches recent paper abstracts from arXiv related to a topic.""" | |
| logging.info(f"Fetching {count} arXiv papers for topic: '{topic}'") | |
| try: | |
| loader = ArxivLoader(query=topic, load_max_docs=count, load_all_available_meta=True) | |
| docs = loader.load() | |
| logging.info(f"Successfully fetched {len(docs)} documents from arXiv.") | |
| return docs | |
| except Exception as e: | |
| logging.error(f"Failed to fetch from arXiv: {e}") | |
| return [] | |
| def generate_knowledge_base(topic: str, level: str, goal: str, arxiv_docs: list): | |
| """Synthesizes arXiv papers into a structured Knowledge Base for the course.""" | |
| logging.info(f"Generating Knowledge Base for topic: {topic}") | |
| llm = get_llm() | |
| papers_context = "\n\n".join([f"Title: {doc.metadata.get('Title', 'N/A')}\nAbstract: {doc.page_content}" for doc in arxiv_docs]) | |
| prompt = f""" | |
| You are an expert curriculum designer specializing in AI. Your task is to create a structured Knowledge Base for a personalized course on the topic: "{topic}". | |
| The learner's details are: | |
| - Skill Level: {level} | |
| - Learning Goal: {goal} | |
| Synthesize the following cutting-edge research from arXiv to create the course foundation: | |
| --- | |
| {papers_context} | |
| --- | |
| Based on the user's goal and level, and the provided research, generate a JSON object with the following structure: | |
| 1. "topic": The main topic. | |
| 2. "introduction": A brief, engaging introduction tailored to the learner's level. | |
| 3. "learning_path": An array of 5-7 key concepts that form the course outline. Each concept should be a string. Example: ["Introduction to Transformers", "The Attention Mechanism", "BERT and its Variants"]. | |
| 4. "detailed_concepts": A dictionary where each key is a concept from the "learning_path" and the value is a detailed explanation (2-3 paragraphs) suitable for the learner's level. | |
| Return ONLY the valid JSON object, with no markdown formatting. | |
| """ | |
| try: | |
| response = llm.invoke(prompt).content.strip() | |
| if response.startswith("```json"): response = response[7:-3] | |
| knowledge_base = json.loads(response) | |
| logging.info("Successfully generated Knowledge Base.") | |
| return knowledge_base | |
| except Exception as e: | |
| logging.error(f"Failed to generate Knowledge Base: {e}") | |
| raise | |
| def generate_lesson_from_knowledge_base(knowledge_base: dict, concept_to_cover: str): | |
| """Generates a script and quiz for a lesson, strategically inserting animation tags.""" | |
| logging.info(f"Generating lesson for concept: '{concept_to_cover}'") | |
| llm = get_llm() | |
| concept_details = knowledge_base.get("detailed_concepts", {}).get(concept_to_cover, "") | |
| available_animations = ["Linear Regression"] | |
| animation_instruction = "" | |
| if concept_to_cover in available_animations: | |
| animation_tag = concept_to_cover.lower().replace(" ", "_") | |
| animation_instruction = f'When explaining the core mechanism of {concept_to_cover}, you MUST insert the tag `<animate_matplotlib: "{animation_tag}">` in the script. This is crucial for visualization.' | |
| prompt = f""" | |
| You are ProfAI, an engaging AI professor creating a lesson on "{concept_to_cover}". | |
| Detailed information: | |
| --- | |
| {concept_details} | |
| --- | |
| {animation_instruction} | |
| Generate a JSON object with "script" (a 60-90 second video script) and "quiz" (3 multiple-choice questions). | |
| The script should be conversational and easy to understand. | |
| Return ONLY the valid JSON object. | |
| """ | |
| try: | |
| response = llm.invoke(prompt).content.strip() | |
| if response.startswith("```json"): response = response[7:-3] | |
| return json.loads(response) | |
| except Exception as e: | |
| logging.error(f"Failed to generate lesson content: {e}") | |
| raise | |
| def generate_remedial_lesson(failed_concept: str): | |
| """Generates a short, focused remedial lesson.""" | |
| logging.info(f"Generating remedial lesson for concept: '{failed_concept}'") | |
| llm = get_llm() | |
| prompt = f""" | |
| You are ProfAI. A student struggled to understand the concept of "{failed_concept}". | |
| Your task is to create a short, remedial micro-lesson to help them. | |
| Generate a JSON object with two keys: | |
| 1. "script": A very simple, concise script (30-45 seconds) explaining "{failed_concept}" with a different analogy or a simpler approach. | |
| 2. "quiz": An array with ONE multiple-choice question to confirm their understanding. | |
| Return ONLY the valid JSON object. | |
| """ | |
| try: | |
| response = llm.invoke(prompt).content.strip() | |
| if response.startswith("```json"): response = response[7:-3] | |
| return json.loads(response) | |
| except Exception as e: | |
| logging.error(f"Failed to generate remedial lesson: {e}") | |
| raise | |
| # --- Animation & Video Generation --- | |
| def animate_linear_regression(duration, temp_dir): | |
| """Generates a Matplotlib animation of Linear Regression.""" | |
| logging.info("Generating Matplotlib animation for Linear Regression.") | |
| fig, ax = plt.subplots(figsize=(WIDTH/100, HEIGHT/100)) | |
| np.random.seed(42) | |
| X = 2 * np.random.rand(100, 1) | |
| y = 4 + 3 * X + np.random.randn(100, 1) | |
| ax.scatter(X, y, alpha=0.6, label='Data Points') | |
| line, = ax.plot([], [], 'r-', lw=3, label='Regression Line') | |
| ax.set_xlim(0, 2) | |
| ax.set_ylim(0, 15) | |
| ax.set_title("Linear Regression: Finding the Best Fit Line", fontsize=20) | |
| ax.set_xlabel("Feature (X)", fontsize=14) | |
| ax.set_ylabel("Target (y)", fontsize=14) | |
| ax.grid(True, linestyle='--', alpha=0.6) | |
| ax.legend() | |
| plt.tight_layout() | |
| def init(): | |
| line.set_data([], []) | |
| return line, | |
| def update(frame): | |
| # Animate the line converging to the best fit | |
| # This is a simplified animation for demonstration | |
| progress = frame / (duration * FPS) | |
| slope = 3 * progress | |
| intercept = 4 | |
| x_vals = np.array([0, 2]) | |
| y_vals = intercept + slope * x_vals | |
| line.set_data(x_vals, y_vals) | |
| return line, | |
| anim = FuncAnimation(fig, update, frames=int(duration * FPS), init_func=init, blit=True) | |
| output_path = temp_dir / f"anim_{uuid.uuid4().hex}.mp4" | |
| anim.save(str(output_path), writer=FFMpegWriter(fps=FPS)) | |
| plt.close(fig) | |
| logging.info(f"Matplotlib animation saved to {output_path}") | |
| return VideoFileClip(str(output_path)) | |
| def generate_matplotlib_animation(concept_tag: str, duration: float, temp_dir: Path): | |
| """Router to generate the correct Matplotlib animation based on a tag.""" | |
| if concept_tag == "linear_regression": | |
| return animate_linear_regression(duration, temp_dir) | |
| return None | |
| def create_lesson_video(script: str, narration_audio_bytes: bytes): | |
| """Creates a complete lesson video, incorporating Matplotlib animations if tagged.""" | |
| logging.info("Starting comprehensive video generation.") | |
| with tempfile.TemporaryDirectory() as temp_dir_str: | |
| temp_dir = Path(temp_dir_str) | |
| audio_path = temp_dir / "narration.mp3" | |
| audio_path.write_bytes(narration_audio_bytes) | |
| audio_clip = AudioFileClip(str(audio_path)) | |
| total_duration = audio_clip.duration | |
| tag_pattern = r'(<animate_matplotlib: "([^"]+)">)' | |
| script_parts = re.split(tag_pattern, script) | |
| text_segments = [s for s in script_parts[::3] if s.strip()] | |
| tags = script_parts[2::3] | |
| final_clips = [] | |
| running_time = 0 | |
| # This allocation is simplified; a more robust method might time the audio parts. | |
| total_text_chars = sum(len(s) for s in text_segments) | |
| time_per_char = total_duration / total_text_chars if total_text_chars > 0 else 0 | |
| # Create clips for each segment | |
| for i, text_part in enumerate(text_segments): | |
| part_duration = len(text_part) * time_per_char | |
| txt_clip = TextClip(text_part.strip(), fontsize=40, color='white', font='Arial-Bold', size=(WIDTH*0.8, None), method='caption').set_duration(part_duration) | |
| final_clips.append(txt_clip.set_start(running_time).set_position('center')) | |
| running_time += part_duration | |
| if i < len(tags): | |
| anim_duration = 7 # Fixed duration for matplotlib animations | |
| anim_clip = generate_matplotlib_animation(tags[i], anim_duration, temp_dir) | |
| if anim_clip: | |
| final_clips.append(anim_clip.set_duration(anim_duration).set_start(running_time).set_position('center')) | |
| running_time += anim_duration | |
| final_duration = running_time | |
| bg_clip = ColorClip(size=(WIDTH, HEIGHT), color=(20, 20, 40)).set_duration(final_duration) | |
| final_video = CompositeVideoClip([bg_clip] + final_clips) | |
| final_video = final_video.set_audio(audio_clip.set_duration(final_duration)) | |
| output_path = temp_dir / "final_video.mp4" | |
| final_video.write_videofile(str(output_path), codec='libx264', fps=FPS, threads=4, logger='bar') | |
| return Path(output_path).read_bytes() |