cryogenic22 commited on
Commit
113661b
·
verified ·
1 Parent(s): 9ab70df

Create learning_platform.py

Browse files
Files changed (1) hide show
  1. learning_platform.py +212 -0
learning_platform.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # learning_platform.py
2
+ from typing import List, Dict, Any, Optional
3
+ from dataclasses import dataclass
4
+ from datetime import datetime
5
+ import json
6
+ import logging
7
+ from langchain.chat_models import ChatOpenAI
8
+ from langchain.embeddings import OpenAIEmbeddings
9
+ from langchain.vectorstores import FAISS
10
+ from langchain.memory import ConversationBufferMemory
11
+ from langchain.chains import ConversationalRetrievalQA
12
+ from prompts import CoursePromptTemplates
13
+ from models import *
14
+ from sqlalchemy.orm import Session
15
+ import tiktoken
16
+
17
+ # Enhanced logging configuration
18
+ logging.basicConfig(
19
+ level=logging.DEBUG,
20
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
21
+ handlers=[
22
+ logging.FileHandler('learning_platform.log'),
23
+ logging.StreamHandler()
24
+ ]
25
+ )
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+ class EnhancedCourseBuilder:
30
+ def __init__(self, api_key: str, db_session: Session):
31
+ self.api_key = api_key
32
+ self.db_session = db_session
33
+ self.prompt_templates = CoursePromptTemplates()
34
+
35
+ # Initialize LLM with increased tokens and temperature
36
+ self.llm = ChatOpenAI(
37
+ temperature=0.7,
38
+ model="gpt-4-turbo-preview", # Using the latest model with higher token limit
39
+ max_tokens=4096,
40
+ openai_api_key=api_key
41
+ )
42
+
43
+ # Initialize embeddings and vector store
44
+ self.embeddings = OpenAIEmbeddings(openai_api_key=api_key)
45
+ self.vector_store = FAISS.from_texts(
46
+ ["Initial course content"],
47
+ embedding=self.embeddings
48
+ )
49
+
50
+ # Initialize conversation memory
51
+ self.memory = ConversationBufferMemory(
52
+ memory_key="chat_history",
53
+ return_messages=True
54
+ )
55
+
56
+ # Initialize retrieval QA chain
57
+ self.qa_chain = ConversationalRetrievalQA.from_llm(
58
+ llm=self.llm,
59
+ retriever=self.vector_store.as_retriever(),
60
+ memory=self.memory,
61
+ verbose=True
62
+ )
63
+
64
+ async def create_course(self, topic: str, difficulty: str, user_id: int) -> Course:
65
+ """Create a new course with enhanced content generation"""
66
+ logger.info(f"Creating course for topic: {topic}, difficulty: {difficulty}")
67
+
68
+ try:
69
+ # Generate course content using enhanced prompt
70
+ prompt = self.prompt_templates.COURSE_PLANNING.substitute(
71
+ topic=topic,
72
+ difficulty=difficulty,
73
+ audience_level=difficulty,
74
+ duration="8 weeks",
75
+ learning_style="interactive",
76
+ industry_focus="general"
77
+ )
78
+
79
+ logger.debug(f"Sending course planning prompt: {prompt}")
80
+ response = await self.llm.agenerate([prompt])
81
+ course_plan = json.loads(response.generations[0].text)
82
+
83
+ # Create course in database
84
+ new_course = Course(
85
+ title=topic,
86
+ description=course_plan.get("description"),
87
+ difficulty_level=difficulty,
88
+ content=course_plan,
89
+ metadata={
90
+ "generator_version": "2.0",
91
+ "model": "gpt-4-turbo-preview",
92
+ "creation_parameters": {
93
+ "difficulty": difficulty,
94
+ "topic": topic
95
+ }
96
+ }
97
+ )
98
+
99
+ self.db_session.add(new_course)
100
+ self.db_session.commit()
101
+
102
+ # Create user course association
103
+ user_course = UserCourse(
104
+ user_id=user_id,
105
+ course_id=new_course.id,
106
+ status="enrolled"
107
+ )
108
+
109
+ self.db_session.add(user_course)
110
+ self.db_session.commit()
111
+
112
+ # Store course content in vector store
113
+ self.vector_store.add_texts(
114
+ [json.dumps(course_plan)],
115
+ metadatas=[{"type": "course_plan", "course_id": new_course.id}]
116
+ )
117
+
118
+ logger.info(f"Successfully created course: {new_course.id}")
119
+ return new_course
120
+
121
+ except Exception as e:
122
+ logger.error(f"Error creating course: {str(e)}", exc_info=True)
123
+ raise
124
+
125
+ async def generate_module_content(self, module_id: int) -> Dict[str, Any]:
126
+ """Generate detailed content for a specific module"""
127
+ logger.info(f"Generating content for module: {module_id}")
128
+
129
+ try:
130
+ module = self.db_session.query(CourseModule).get(module_id)
131
+ if not module:
132
+ raise ValueError(f"Module not found: {module_id}")
133
+
134
+ prompt = self.prompt_templates.MODULE_CONTENT.substitute(
135
+ title=module.title,
136
+ objectives=json.dumps(module.content.get("objectives", [])),
137
+ prerequisites=json.dumps(module.prerequisites),
138
+ competency_level=module.course.difficulty_level,
139
+ industry_context="general"
140
+ )
141
+
142
+ logger.debug(f"Sending module content prompt: {prompt}")
143
+ response = await self.llm.agenerate([prompt])
144
+ content = json.loads(response.generations[0].text)
145
+
146
+ # Update module content
147
+ module.content.update(content)
148
+ self.db_session.commit()
149
+
150
+ # Store content in vector store
151
+ self.vector_store.add_texts(
152
+ [json.dumps(content)],
153
+ metadatas=[{"type": "module_content", "module_id": module_id}]
154
+ )
155
+
156
+ logger.info(f"Successfully generated content for module: {module_id}")
157
+ return content
158
+
159
+ except Exception as e:
160
+ logger.error(f"Error generating module content: {str(e)}", exc_info=True)
161
+ raise
162
+
163
+ async def answer_user_question(
164
+ self,
165
+ user_id: int,
166
+ course_id: int,
167
+ module_id: int,
168
+ question: str
169
+ ) -> str:
170
+ """Answer user questions with context awareness"""
171
+ logger.info(f"Answering question for user {user_id} in course {course_id}")
172
+
173
+ try:
174
+ # Get context
175
+ module = self.db_session.query(CourseModule).get(module_id)
176
+ course = module.course
177
+ user_course = self.db_session.query(UserCourse).filter_by(
178
+ user_id=user_id,
179
+ course_id=course_id
180
+ ).first()
181
+
182
+ # Generate prompt with context
183
+ prompt = self.prompt_templates.USER_QUESTION.substitute(
184
+ topic=course.title,
185
+ module_title=module.title,
186
+ completed_modules=self.get_completed_modules(user_course.id),
187
+ user_level=course.difficulty_level,
188
+ question=question
189
+ )
190
+
191
+ # Use QA chain for answer
192
+ response = await self.qa_chain.arun(prompt)
193
+
194
+ # Log interaction
195
+ interaction = UserInteraction(
196
+ user_id=user_id,
197
+ interaction_type="question_asked",
198
+ content_reference=f"module_{module_id}",
199
+ metadata={
200
+ "question": question,
201
+ "response": response
202
+ }
203
+ )
204
+ self.db_session.add(interaction)
205
+ self.db_session.commit()
206
+
207
+ logger.info(f"Successfully answered question for user {user_id}")
208
+ return response
209
+
210
+ except Exception as e:
211
+ logger.error(f"Error answering question: {str(e)}", exc_info=True)
212
+ raise