Spaces:
Sleeping
Sleeping
| """ | |
| Proactive Learning Assistance Module (Phase 1) | |
| Implements intelligent prompting suggestions, context-aware follow-up questions, and critical knowledge gap identification | |
| """ | |
| import json | |
| from typing import Dict, List, Optional, Tuple | |
| from datetime import datetime | |
| from openai import OpenAI | |
| class KnowledgeGapAnalyzer: | |
| """Analyzes user knowledge gaps, especially critical safety-related gaps""" | |
| # Safety-critical ADAS features that require high knowledge levels | |
| SAFETY_CRITICAL_FEATURES = [ | |
| "Function of Active Distance Assist DISTRONIC", | |
| "Function of Active Stop-and-Go Assist", | |
| "Function of Active Steering Assist" | |
| ] | |
| # Knowledge level thresholds | |
| CRITICAL_GAP_THRESHOLD = 0.5 # Below this is considered a critical gap for safety features | |
| WEAK_AREA_THRESHOLD = 0.6 # Below this is considered a weak area | |
| def __init__(self, available_topics: List[str]): | |
| self.available_topics = available_topics | |
| def identify_critical_gaps(self, user_profile) -> List[str]: | |
| """ | |
| Identify critical knowledge gaps that could impact safety | |
| Returns: | |
| List of topics with critical knowledge gaps | |
| """ | |
| critical_gaps = [] | |
| knowledge_level = user_profile.knowledge_level if hasattr(user_profile, 'knowledge_level') else {} | |
| for topic in self.available_topics: | |
| level = knowledge_level.get(topic, 0.0) | |
| # Check if it's a safety-critical feature with low knowledge | |
| if topic in self.SAFETY_CRITICAL_FEATURES and level < self.CRITICAL_GAP_THRESHOLD: | |
| critical_gaps.append(topic) | |
| return critical_gaps | |
| def identify_weak_areas(self, user_profile) -> List[str]: | |
| """ | |
| Identify all weak areas (not just critical) | |
| Returns: | |
| List of topics with weak knowledge levels | |
| """ | |
| weak_areas = [] | |
| knowledge_level = user_profile.knowledge_level if hasattr(user_profile, 'knowledge_level') else {} | |
| for topic in self.available_topics: | |
| level = knowledge_level.get(topic, 0.0) | |
| if level < self.WEAK_AREA_THRESHOLD: | |
| weak_areas.append(topic) | |
| return weak_areas | |
| def get_gap_priority(self, user_profile) -> List[Tuple[str, float]]: | |
| """ | |
| Get knowledge gaps with priority scores | |
| Returns: | |
| List of (topic, priority_score) tuples, sorted by priority | |
| """ | |
| gaps = [] | |
| knowledge_level = user_profile.knowledge_level if hasattr(user_profile, 'knowledge_level') else {} | |
| for topic in self.available_topics: | |
| level = knowledge_level.get(topic, 0.0) | |
| # Calculate priority score | |
| priority = 0.0 | |
| # Safety-critical features get higher priority | |
| if topic in self.SAFETY_CRITICAL_FEATURES: | |
| priority += 2.0 | |
| # Lower knowledge level = higher priority | |
| priority += (1.0 - level) * 1.5 | |
| # Check if it's in weak areas | |
| if hasattr(user_profile, 'weak_areas') and topic in user_profile.weak_areas: | |
| priority += 0.5 | |
| gaps.append((topic, priority)) | |
| # Sort by priority (descending) | |
| gaps.sort(key=lambda x: x[1], reverse=True) | |
| return gaps | |
| class PromptSuggestionGenerator: | |
| """Generates intelligent prompt suggestions based on user profile and learning history""" | |
| def __init__(self, client: OpenAI, rag_engine, knowledge_gap_analyzer: KnowledgeGapAnalyzer, | |
| available_topics: List[str]): | |
| self.client = client | |
| self.rag_engine = rag_engine | |
| self.gap_analyzer = knowledge_gap_analyzer | |
| self.available_topics = available_topics | |
| def generate_suggestions(self, user_id: str, user_profile, learning_path=None, | |
| context: Optional[str] = None, max_suggestions: int = 5) -> List[Dict[str, str]]: | |
| """ | |
| Generate prompt suggestions based on multiple criteria | |
| Args: | |
| user_id: User ID | |
| user_profile: UserProfile object | |
| learning_path: Optional LearningPath object | |
| context: Optional context (e.g., recent question) | |
| max_suggestions: Maximum number of suggestions to return | |
| Returns: | |
| List of suggestion dictionaries with 'question' and 'reason' keys | |
| """ | |
| suggestions = [] | |
| # 1. Based on critical knowledge gaps | |
| critical_gaps = self.gap_analyzer.identify_critical_gaps(user_profile) | |
| for topic in critical_gaps[:2]: # Top 2 critical gaps | |
| question = self._generate_question_for_topic(topic, "beginner") | |
| if question: | |
| suggestions.append({ | |
| "question": question, | |
| "reason": f"Critical Safety Feature: Your understanding of {topic.replace('Function of ', '')} needs improvement", | |
| "priority": "high", | |
| "type": "critical_gap" | |
| }) | |
| # 2. Based on learning path | |
| if learning_path and hasattr(learning_path, 'nodes') and learning_path.nodes: | |
| current_node = None | |
| if learning_path.current_node_index < len(learning_path.nodes): | |
| current_node = learning_path.nodes[learning_path.current_node_index] | |
| if current_node and current_node.status != "completed": | |
| question = self._generate_question_for_topic(current_node.topic, current_node.bloom_level) | |
| if question: | |
| suggestions.append({ | |
| "question": question, | |
| "reason": f"Learning Path: Current learning node - {current_node.topic}", | |
| "priority": "medium", | |
| "type": "learning_path" | |
| }) | |
| # 3. Based on weak areas | |
| weak_areas = self.gap_analyzer.identify_weak_areas(user_profile) | |
| for topic in weak_areas[:2]: # Top 2 weak areas | |
| if topic not in critical_gaps: # Avoid duplicates | |
| question = self._generate_question_for_topic(topic, "understand") | |
| if question: | |
| suggestions.append({ | |
| "question": question, | |
| "reason": f"Weak Area: Recommend strengthening understanding of {topic.replace('Function of ', '')}", | |
| "priority": "medium", | |
| "type": "weak_area" | |
| }) | |
| # 4. Based on recent questions (if context provided) | |
| if context: | |
| related_questions = self._generate_related_questions(context) | |
| for q in related_questions[:2]: | |
| suggestions.append({ | |
| "question": q, | |
| "reason": "Related Question: Explore deeper into the topic you just asked about", | |
| "priority": "low", | |
| "type": "related" | |
| }) | |
| # 5. Based on unlearned topics | |
| knowledge_level = user_profile.knowledge_level if hasattr(user_profile, 'knowledge_level') else {} | |
| unlearned_topics = [t for t in self.available_topics if t not in knowledge_level] | |
| for topic in unlearned_topics[:1]: # Top 1 unlearned topic | |
| question = self._generate_question_for_topic(topic, "remember") | |
| if question: | |
| suggestions.append({ | |
| "question": question, | |
| "reason": f"New Topic: Start learning {topic.replace('Function of ', '')}", | |
| "priority": "low", | |
| "type": "new_topic" | |
| }) | |
| # Rank and filter suggestions | |
| suggestions = self._rank_suggestions(suggestions) | |
| return suggestions[:max_suggestions] | |
| def _generate_question_for_topic(self, topic: str, level: str = "understand") -> Optional[str]: | |
| """Generate a question for a specific topic""" | |
| try: | |
| # Use RAG to get topic information | |
| query = f"What are the key points about {topic}?" | |
| answer, _ = self.rag_engine.query(query) | |
| # Generate question using LLM | |
| prompt = f"""Based on the following information about {topic}, generate a single, clear question that a user might ask to learn about this topic. | |
| The question should be at a {level} level (from Bloom's taxonomy). | |
| Information: | |
| {answer[:500]} # Limit context to avoid token limits | |
| Generate only the question text, nothing else. The question should be: | |
| - Clear and specific | |
| - Appropriate for someone learning about ADAS systems | |
| - In Chinese or English (match the user's language preference) | |
| Question:""" | |
| response = self.client.chat.completions.create( | |
| model="gpt-4o-mini", | |
| messages=[ | |
| {"role": "system", "content": "You are a helpful assistant that generates educational questions."}, | |
| {"role": "user", "content": prompt} | |
| ], | |
| temperature=0.7, | |
| max_tokens=100 | |
| ) | |
| question = response.choices[0].message.content.strip() | |
| # Remove quotes if present | |
| question = question.strip('"').strip("'") | |
| return question | |
| except Exception as e: | |
| print(f"Error generating question for topic {topic}: {e}") | |
| # Fallback to simple question | |
| topic_clean = topic.replace("Function of ", "").replace(" Assist", "") | |
| return f"What is {topic_clean} and how does it work?" | |
| def _generate_related_questions(self, context: str) -> List[str]: | |
| """Generate related questions based on context""" | |
| try: | |
| prompt = f"""Based on the following question or context, generate 2-3 related follow-up questions that would help deepen understanding. | |
| Context: {context[:300]} | |
| Generate 2-3 questions, one per line. Questions should: | |
| - Build upon the context | |
| - Help explore related concepts | |
| - Be clear and specific | |
| Questions:""" | |
| response = self.client.chat.completions.create( | |
| model="gpt-4o-mini", | |
| messages=[ | |
| {"role": "system", "content": "You are a helpful assistant that generates educational follow-up questions."}, | |
| {"role": "user", "content": prompt} | |
| ], | |
| temperature=0.7, | |
| max_tokens=200 | |
| ) | |
| questions_text = response.choices[0].message.content.strip() | |
| questions = [q.strip().strip('-').strip() for q in questions_text.split('\n') if q.strip()] | |
| return questions[:3] | |
| except Exception as e: | |
| print(f"Error generating related questions: {e}") | |
| return [] | |
| def _rank_suggestions(self, suggestions: List[Dict]) -> List[Dict]: | |
| """Rank suggestions by priority""" | |
| priority_weights = {"high": 3, "medium": 2, "low": 1} | |
| suggestions.sort(key=lambda x: priority_weights.get(x.get("priority", "low"), 1), reverse=True) | |
| return suggestions | |
| class FollowUpQuestionGenerator: | |
| """Generates context-aware follow-up questions based on RAG answers""" | |
| def __init__(self, client: OpenAI, rag_engine): | |
| self.client = client | |
| self.rag_engine = rag_engine | |
| self.bloom_levels = ["remember", "understand", "apply", "analyze", "evaluate", "create"] | |
| def generate_follow_up_questions(self, answer: str, user_profile, | |
| max_questions: int = 5) -> List[Dict[str, str]]: | |
| """ | |
| Generate follow-up questions based on the answer provided | |
| Args: | |
| answer: The RAG answer text | |
| user_profile: UserProfile object | |
| max_questions: Maximum number of questions to generate | |
| Returns: | |
| List of question dictionaries with 'question' and 'bloom_level' keys | |
| """ | |
| questions = [] | |
| # Determine user's current Bloom level (default to understand) | |
| current_bloom = self._infer_user_bloom_level(user_profile) | |
| current_index = self.bloom_levels.index(current_bloom) if current_bloom in self.bloom_levels else 1 | |
| # Generate questions for next 2-3 Bloom levels | |
| target_levels = self.bloom_levels[current_index:current_index + 3] | |
| for level in target_levels[:2]: # Limit to 2 levels | |
| level_questions = self._generate_questions_by_bloom(answer, level) | |
| questions.extend(level_questions[:2]) # 2 questions per level | |
| # Also generate related concept questions | |
| related_questions = self._generate_related_concept_questions(answer) | |
| questions.extend(related_questions[:1]) | |
| return questions[:max_questions] | |
| def _infer_user_bloom_level(self, user_profile) -> str: | |
| """Infer user's current Bloom taxonomy level based on profile""" | |
| # Check recent test performance | |
| if hasattr(user_profile, 'bloom_level_performance') and user_profile.bloom_level_performance: | |
| # Find the highest level where user has good performance | |
| for level in reversed(self.bloom_levels): | |
| for topic_perf in user_profile.bloom_level_performance.values(): | |
| if level in topic_perf and topic_perf[level] >= 0.7: | |
| return level | |
| # Default based on overall progress | |
| if hasattr(user_profile, 'knowledge_level') and user_profile.knowledge_level: | |
| avg_level = sum(user_profile.knowledge_level.values()) / len(user_profile.knowledge_level.values()) | |
| if avg_level < 0.3: | |
| return "remember" | |
| elif avg_level < 0.6: | |
| return "understand" | |
| else: | |
| return "apply" | |
| return "understand" # Default | |
| def _generate_questions_by_bloom(self, answer: str, bloom_level: str) -> List[Dict[str, str]]: | |
| """Generate questions at a specific Bloom taxonomy level""" | |
| try: | |
| bloom_descriptions = { | |
| "remember": "test basic recall of facts and information", | |
| "understand": "test explanation and interpretation of concepts", | |
| "apply": "test application of knowledge in practical situations", | |
| "analyze": "test analysis of relationships and structure", | |
| "evaluate": "test evaluation and judgment based on criteria", | |
| "create": "test creation of new ideas or solutions" | |
| } | |
| prompt = f"""Based on the following answer about ADAS systems, generate 2 follow-up questions at the {bloom_level} level of Bloom's taxonomy. | |
| Bloom level description: {bloom_descriptions.get(bloom_level, '')} | |
| Answer text: | |
| {answer[:800]} # Limit context | |
| Generate 2 questions that: | |
| - Build upon the information in the answer | |
| - Are at the {bloom_level} level | |
| - Help deepen understanding | |
| - Are clear and specific | |
| Output format: One question per line, no numbering or bullets. | |
| Questions:""" | |
| response = self.client.chat.completions.create( | |
| model="gpt-4o-mini", | |
| messages=[ | |
| {"role": "system", "content": "You are an educational assistant that generates follow-up questions."}, | |
| {"role": "user", "content": prompt} | |
| ], | |
| temperature=0.7, | |
| max_tokens=200 | |
| ) | |
| questions_text = response.choices[0].message.content.strip() | |
| question_list = [q.strip().strip('-').strip() for q in questions_text.split('\n') if q.strip()] | |
| return [{"question": q, "bloom_level": bloom_level} for q in question_list[:2]] | |
| except Exception as e: | |
| print(f"Error generating questions by Bloom level: {e}") | |
| return [] | |
| def _generate_related_concept_questions(self, answer: str) -> List[Dict[str, str]]: | |
| """Generate questions about related concepts""" | |
| try: | |
| prompt = f"""Based on the following answer, generate 1 question about a related ADAS concept that would help the user understand the broader context. | |
| Answer: | |
| {answer[:500]} | |
| Generate 1 question about a related concept or feature that connects to the information provided. | |
| Question:""" | |
| response = self.client.chat.completions.create( | |
| model="gpt-4o-mini", | |
| messages=[ | |
| {"role": "system", "content": "You are an educational assistant."}, | |
| {"role": "user", "content": prompt} | |
| ], | |
| temperature=0.7, | |
| max_tokens=100 | |
| ) | |
| question = response.choices[0].message.content.strip().strip('"').strip("'") | |
| return [{"question": question, "bloom_level": "understand"}] | |
| except Exception as e: | |
| print(f"Error generating related concept question: {e}") | |
| return [] | |
| class ProactiveLearningEngine: | |
| """Main engine for proactive learning assistance""" | |
| def __init__(self, client: OpenAI, rag_engine, user_profiling, adaptive_engine=None, | |
| available_topics: List[str] = None): | |
| self.client = client | |
| self.rag_engine = rag_engine | |
| self.user_profiling = user_profiling | |
| self.adaptive_engine = adaptive_engine | |
| self.available_topics = available_topics or [] | |
| # Initialize components | |
| self.gap_analyzer = KnowledgeGapAnalyzer(self.available_topics) | |
| self.suggestion_generator = PromptSuggestionGenerator( | |
| client, rag_engine, self.gap_analyzer, self.available_topics | |
| ) | |
| self.followup_generator = FollowUpQuestionGenerator(client, rag_engine) | |
| def get_prompt_suggestions(self, user_id: str, context: Optional[str] = None, | |
| max_suggestions: int = 5) -> List[Dict[str, str]]: | |
| """ | |
| Get prompt suggestions for a user | |
| Args: | |
| user_id: User ID | |
| context: Optional context (e.g., recent question) | |
| max_suggestions: Maximum number of suggestions | |
| Returns: | |
| List of suggestion dictionaries | |
| """ | |
| if not self.user_profiling: | |
| return [] | |
| user_profile = self.user_profiling.get_or_create_profile(user_id) | |
| # Get learning path if available | |
| learning_path = None | |
| if self.adaptive_engine: | |
| learning_path = self.adaptive_engine.get_active_path(user_id) | |
| return self.suggestion_generator.generate_suggestions( | |
| user_id, user_profile, learning_path, context, max_suggestions | |
| ) | |
| def get_follow_up_questions(self, user_id: str, answer: str, | |
| max_questions: int = 5) -> List[Dict[str, str]]: | |
| """ | |
| Get follow-up questions based on an answer | |
| Args: | |
| user_id: User ID | |
| answer: The RAG answer text | |
| max_questions: Maximum number of questions | |
| Returns: | |
| List of question dictionaries | |
| """ | |
| if not self.user_profiling: | |
| return [] | |
| user_profile = self.user_profiling.get_or_create_profile(user_id) | |
| return self.followup_generator.generate_follow_up_questions( | |
| answer, user_profile, max_questions | |
| ) | |
| def get_critical_gaps(self, user_id: str) -> List[str]: | |
| """ | |
| Get critical knowledge gaps for a user | |
| Args: | |
| user_id: User ID | |
| Returns: | |
| List of topics with critical gaps | |
| """ | |
| if not self.user_profiling: | |
| return [] | |
| user_profile = self.user_profiling.get_or_create_profile(user_id) | |
| return self.gap_analyzer.identify_critical_gaps(user_profile) | |
| def analyze_user_state(self, user_id: str) -> Dict: | |
| """ | |
| Analyze user's current learning state | |
| Args: | |
| user_id: User ID | |
| Returns: | |
| Dictionary with analysis results | |
| """ | |
| if not self.user_profiling: | |
| return {} | |
| user_profile = self.user_profiling.get_or_create_profile(user_id) | |
| critical_gaps = self.gap_analyzer.identify_critical_gaps(user_profile) | |
| weak_areas = self.gap_analyzer.identify_weak_areas(user_profile) | |
| gap_priorities = self.gap_analyzer.get_gap_priority(user_profile) | |
| return { | |
| "critical_gaps": critical_gaps, | |
| "weak_areas": weak_areas, | |
| "gap_priorities": gap_priorities[:5], # Top 5 | |
| "total_gaps": len(weak_areas), | |
| "critical_gaps_count": len(critical_gaps) | |
| } | |