IW2025 commited on
Commit
32ddee3
Β·
verified Β·
1 Parent(s): 00a2c28

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -41
app.py CHANGED
@@ -83,26 +83,6 @@ class LLMCurriculumAssistant:
83
  api_key=os.environ.get("ANTHROPIC_KEY")
84
  )
85
 
86
- # Create LLM wrapper for LangChain compatibility
87
- class ClaudeLLM:
88
- def __init__(self, client):
89
- self.client = client
90
-
91
- def __call__(self, prompt):
92
- try:
93
- response = self.client.messages.create(
94
- model="claude-3-5-haiku-20241022",
95
- max_tokens=1500,
96
- temperature=0.7,
97
- messages=[{"role": "user", "content": prompt}]
98
- )
99
- return response.content[0].text
100
- except Exception as e:
101
- print(f"Error calling Claude: {e}")
102
- return "I'm sorry, I couldn't generate a response at the moment."
103
-
104
- self.llm = ClaudeLLM(self.anthropic_client)
105
-
106
  # Create content selection prompt
107
  content_selection_template = """Hi! I'm helping a student find the best curriculum slide for their question.
108
 
@@ -120,12 +100,9 @@ Just respond with the slide number (1, 2, 3, etc.) that you think is most helpfu
120
 
121
  Thanks! Slide number:"""
122
 
123
- self.content_selection_chain = LLMChain(
124
- llm=self.llm,
125
- prompt=PromptTemplate(
126
- input_variables=["question", "slide_contents"],
127
- template=content_selection_template
128
- )
129
  )
130
 
131
  # Create answer generation prompt
@@ -146,21 +123,18 @@ Could you help me explain this to them in a friendly, educational way? I'd like
146
 
147
  Thanks for your help! Here's what I'd tell the student:"""
148
 
149
- self.answer_chain = LLMChain(
150
- llm=self.llm,
151
- prompt=PromptTemplate(
152
- input_variables=["question", "slide_content"],
153
- template=answer_template
154
- )
155
  )
156
 
157
  print("βœ… LLM setup successful!")
158
 
159
  except Exception as e:
160
  print(f"❌ Error setting up LLM: {e}")
161
- self.llm = None
162
- self.content_selection_chain = None
163
- self.answer_chain = None
164
 
165
  def get_pdf_page_image(self, pdf_path, page_num):
166
  """Get PDF page as image"""
@@ -198,7 +172,7 @@ Thanks for your help! Here's what I'd tell the student:"""
198
  selected_content = None
199
  selected_result = None
200
 
201
- if self.content_selection_chain:
202
  try:
203
  # Prepare slide contents for LLM analysis
204
  slide_contents = []
@@ -212,12 +186,21 @@ Thanks for your help! Here's what I'd tell the student:"""
212
 
213
  print("πŸ€– Using LLM to select most relevant content...")
214
 
215
- # Get LLM's selection
216
- selection_response = self.content_selection_chain.run(
217
  question=query,
218
  slide_contents=slide_contents_text
219
  )
220
 
 
 
 
 
 
 
 
 
 
221
  print(f"LLM Selection Response: {selection_response}")
222
 
223
  # Parse the selection
@@ -254,14 +237,25 @@ Thanks for your help! Here's what I'd tell the student:"""
254
 
255
  # Step 3: LLM answer generation
256
  answer = ""
257
- if self.answer_chain and selected_content:
258
  try:
259
  print("πŸ€– Generating LLM answer...")
260
- answer = self.answer_chain.run(
 
 
261
  question=query,
262
  slide_content=selected_content
263
  )
264
- answer = answer.strip()
 
 
 
 
 
 
 
 
 
265
  print(f"βœ… LLM answer generated: {answer[:100]}...")
266
 
267
  except Exception as e:
 
83
  api_key=os.environ.get("ANTHROPIC_KEY")
84
  )
85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  # Create content selection prompt
87
  content_selection_template = """Hi! I'm helping a student find the best curriculum slide for their question.
88
 
 
100
 
101
  Thanks! Slide number:"""
102
 
103
+ self.content_selection_prompt = PromptTemplate(
104
+ input_variables=["question", "slide_contents"],
105
+ template=content_selection_template
 
 
 
106
  )
107
 
108
  # Create answer generation prompt
 
123
 
124
  Thanks for your help! Here's what I'd tell the student:"""
125
 
126
+ self.answer_prompt = PromptTemplate(
127
+ input_variables=["question", "slide_content"],
128
+ template=answer_template
 
 
 
129
  )
130
 
131
  print("βœ… LLM setup successful!")
132
 
133
  except Exception as e:
134
  print(f"❌ Error setting up LLM: {e}")
135
+ self.anthropic_client = None
136
+ self.content_selection_prompt = None
137
+ self.answer_prompt = None
138
 
139
  def get_pdf_page_image(self, pdf_path, page_num):
140
  """Get PDF page as image"""
 
172
  selected_content = None
173
  selected_result = None
174
 
175
+ if self.anthropic_client and self.content_selection_prompt:
176
  try:
177
  # Prepare slide contents for LLM analysis
178
  slide_contents = []
 
186
 
187
  print("πŸ€– Using LLM to select most relevant content...")
188
 
189
+ # Format the prompt
190
+ prompt = self.content_selection_prompt.format(
191
  question=query,
192
  slide_contents=slide_contents_text
193
  )
194
 
195
+ # Get LLM's selection
196
+ response = self.anthropic_client.messages.create(
197
+ model="claude-3-5-haiku-20241022",
198
+ max_tokens=1500,
199
+ temperature=0.7,
200
+ messages=[{"role": "user", "content": prompt}]
201
+ )
202
+
203
+ selection_response = response.content[0].text
204
  print(f"LLM Selection Response: {selection_response}")
205
 
206
  # Parse the selection
 
237
 
238
  # Step 3: LLM answer generation
239
  answer = ""
240
+ if self.anthropic_client and self.answer_prompt and selected_content:
241
  try:
242
  print("πŸ€– Generating LLM answer...")
243
+
244
+ # Format the prompt
245
+ prompt = self.answer_prompt.format(
246
  question=query,
247
  slide_content=selected_content
248
  )
249
+
250
+ # Get LLM's answer
251
+ response = self.anthropic_client.messages.create(
252
+ model="claude-3-5-haiku-20241022",
253
+ max_tokens=1500,
254
+ temperature=0.7,
255
+ messages=[{"role": "user", "content": prompt}]
256
+ )
257
+
258
+ answer = response.content[0].text.strip()
259
  print(f"βœ… LLM answer generated: {answer[:100]}...")
260
 
261
  except Exception as e: