Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -325,5 +325,116 @@ with gr.Blocks() as app:
|
|
| 325 |
book, book_reason, character, character_reason, blog_text,
|
| 326 |
transcript_data, learning_output],
|
| 327 |
outputs=output_summary)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 328 |
|
| 329 |
app.launch()
|
|
|
|
| 325 |
book, book_reason, character, character_reason, blog_text,
|
| 326 |
transcript_data, learning_output],
|
| 327 |
outputs=output_summary)
|
| 328 |
+
# Add these new imports at the top
|
| 329 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 330 |
+
import torch
|
| 331 |
+
from openai import OpenAI # Make sure to install with pip install openai
|
| 332 |
+
|
| 333 |
+
# ========== AI CHATBOT SETUP ==========
|
| 334 |
+
# Initialize DeepSeek model for information retrieval
|
| 335 |
+
deepseek_model_name = "deepseek-ai/deepseek-llm-7b"
|
| 336 |
+
deepseek_tokenizer = AutoTokenizer.from_pretrained(deepseek_model_name)
|
| 337 |
+
deepseek_model = AutoModelForCausalLM.from_pretrained(deepseek_model_name, torch_dtype=torch.float16)
|
| 338 |
+
|
| 339 |
+
# Initialize ChatGPT (you'll need an OpenAI API key)
|
| 340 |
+
client = OpenAI(api_key="your-openai-api-key") # Replace with your actual API key
|
| 341 |
+
|
| 342 |
+
def retrieve_information_with_deepseek(query, student_profile):
|
| 343 |
+
# Prepare context from student profile
|
| 344 |
+
profile_context = f"""
|
| 345 |
+
Student Profile:
|
| 346 |
+
Name: {student_profile.get('name', 'N/A')}
|
| 347 |
+
Age: {student_profile.get('age', 'N/A')}
|
| 348 |
+
Grade Level: {student_profile.get('transcript', {}).get('grade_level', 'N/A')}
|
| 349 |
+
GPA: {student_profile.get('transcript', {}).get('gpa', {}).get('unweighted', 'N/A')} (Unweighted)
|
| 350 |
+
Learning Style: {student_profile.get('learning_style', 'N/A')}
|
| 351 |
+
Interests: {student_profile.get('interests', 'N/A')}
|
| 352 |
+
"""
|
| 353 |
+
|
| 354 |
+
# Format the prompt for DeepSeek
|
| 355 |
+
prompt = f"""
|
| 356 |
+
[CONTEXT]
|
| 357 |
+
{profile_context}
|
| 358 |
+
|
| 359 |
+
[QUERY]
|
| 360 |
+
{query}
|
| 361 |
+
|
| 362 |
+
Based on the student profile and educational context, provide the most accurate and relevant information to answer the query.
|
| 363 |
+
"""
|
| 364 |
+
|
| 365 |
+
# Generate response with DeepSeek
|
| 366 |
+
inputs = deepseek_tokenizer(prompt, return_tensors="pt")
|
| 367 |
+
outputs = deepseek_model.generate(**inputs, max_new_tokens=200)
|
| 368 |
+
accurate_response = deepseek_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 369 |
+
|
| 370 |
+
return accurate_response
|
| 371 |
+
|
| 372 |
+
def generate_chat_response_with_chatgpt(message, history, student_profile):
|
| 373 |
+
# First retrieve accurate information with DeepSeek
|
| 374 |
+
accurate_info = retrieve_information_with_deepseek(message, student_profile)
|
| 375 |
+
|
| 376 |
+
# Prepare conversation history
|
| 377 |
+
chat_history = "\n".join([f"User: {h[0]}\nAI: {h[1]}" for h in history])
|
| 378 |
+
|
| 379 |
+
# Create ChatGPT prompt
|
| 380 |
+
prompt = f"""
|
| 381 |
+
You are a personalized teaching assistant. Use the following accurate information to craft a natural, helpful response:
|
| 382 |
+
|
| 383 |
+
[ACCURATE INFORMATION]
|
| 384 |
+
{accurate_info}
|
| 385 |
+
|
| 386 |
+
[CONVERSATION HISTORY]
|
| 387 |
+
{chat_history}
|
| 388 |
+
|
| 389 |
+
[NEW MESSAGE]
|
| 390 |
+
User: {message}
|
| 391 |
+
|
| 392 |
+
Respond in a friendly, conversational tone while ensuring all factual information remains accurate.
|
| 393 |
+
"""
|
| 394 |
+
|
| 395 |
+
# Get response from ChatGPT
|
| 396 |
+
response = client.chat.completions.create(
|
| 397 |
+
model="gpt-3.5-turbo",
|
| 398 |
+
messages=[
|
| 399 |
+
{"role": "system", "content": "You are a helpful teaching assistant."},
|
| 400 |
+
{"role": "user", "content": prompt}
|
| 401 |
+
],
|
| 402 |
+
temperature=0.7
|
| 403 |
+
)
|
| 404 |
+
|
| 405 |
+
return response.choices[0].message.content
|
| 406 |
+
|
| 407 |
+
# ========== UPDATE GRADIO INTERFACE ==========
|
| 408 |
+
# Add this new tab to your existing with gr.Blocks() as app:
|
| 409 |
+
with gr.Blocks() as app:
|
| 410 |
+
# ... (keep all your existing tabs) ...
|
| 411 |
+
|
| 412 |
+
with gr.Tab("🤖 AI Teaching Assistant"):
|
| 413 |
+
gr.Markdown("## Your Personalized Learning Assistant")
|
| 414 |
+
gr.Markdown("Chat with your AI assistant for personalized learning support")
|
| 415 |
+
|
| 416 |
+
chatbot = gr.ChatInterface(
|
| 417 |
+
fn=lambda message, history: generate_chat_response_with_chatgpt(
|
| 418 |
+
message,
|
| 419 |
+
history,
|
| 420 |
+
student_profile=gr.State()
|
| 421 |
+
),
|
| 422 |
+
examples=[
|
| 423 |
+
"How should I study for my math test?",
|
| 424 |
+
"Can you explain this concept to me in a way that matches my learning style?",
|
| 425 |
+
"What are some good study strategies based on my GPA?",
|
| 426 |
+
"How can I improve my grades in science?"
|
| 427 |
+
],
|
| 428 |
+
additional_inputs=[transcript_data, learning_output]
|
| 429 |
+
)
|
| 430 |
+
|
| 431 |
+
# This connects the profile data to the chatbot
|
| 432 |
+
@app.load
|
| 433 |
+
def load_profile():
|
| 434 |
+
profile_path = os.path.join("student_profiles", "student_profile.json")
|
| 435 |
+
if os.path.exists(profile_path):
|
| 436 |
+
with open(profile_path, "r") as f:
|
| 437 |
+
return json.load(f)
|
| 438 |
+
return {}
|
| 439 |
|
| 440 |
app.launch()
|