Spaces:
Runtime error
Runtime error
| """ | |
| Learnova AI - Educational Tutoring Platform Backend | |
| FastAPI + Gradio + Hugging Face Transformers | |
| """ | |
| import os | |
| import glob | |
| from pathlib import Path | |
| from typing import Dict, List, Optional | |
| from datetime import datetime | |
| import json | |
| from fastapi import FastAPI, HTTPException | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from pydantic import BaseModel | |
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import torch | |
| # Initialize FastAPI | |
| app = FastAPI( | |
| title="Learnova AI Backend", | |
| description="AI-powered educational tutoring platform", | |
| version="1.0.0" | |
| ) | |
| # CORS configuration | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| # Global variables | |
| user_sessions: Dict[str, Dict] = {} | |
| model = None | |
| tokenizer = None | |
| # Education levels | |
| EDUCATION_LEVELS = ["primary", "junior_secondary", "senior_secondary", "undergraduate"] | |
| # Level-specific prompts | |
| LEVEL_PROMPTS = { | |
| "primary": """You are a friendly tutor for primary school students. Use simple words, fun examples, | |
| and short stories. Avoid complex terms. Make learning fun and engaging.""", | |
| "junior_secondary": """You are a helpful tutor for junior secondary students. Use clear examples, | |
| define any complex terms, and relate concepts to everyday life. Be encouraging and patient.""", | |
| "senior_secondary": """You are an experienced tutor for senior secondary students. Focus on exam concepts, | |
| provide diagrams and visual aids when possible, and use real-world applications. Be thorough and precise.""", | |
| "undergraduate": """You are an academic tutor for undergraduate students. Include technical details, | |
| academic references, and encourage critical analysis. Provide in-depth explanations and advanced concepts.""" | |
| } | |
| # Pydantic models for API | |
| class StartSessionRequest(BaseModel): | |
| user_id: str | |
| education_level: str | |
| class ChatRequest(BaseModel): | |
| user_id: str | |
| message: str | |
| class ChatResponse(BaseModel): | |
| response: str | |
| has_syllabus_materials: bool | |
| available_materials: List[str] | |
| timestamp: str | |
| class SessionResponse(BaseModel): | |
| user_id: str | |
| education_level: str | |
| session_started: str | |
| message: str | |
| class SyllabusStatusResponse(BaseModel): | |
| levels: Dict[str, List[str]] | |
| total_materials: int | |
| def load_ai_model(): | |
| """Load the DialoGPT model and tokenizer""" | |
| global model, tokenizer | |
| if model is None: | |
| print("Loading Microsoft DialoGPT-medium model...") | |
| try: | |
| tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium") | |
| model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium") | |
| if tokenizer.pad_token is None: | |
| tokenizer.pad_token = tokenizer.eos_token | |
| print("Model loaded successfully!") | |
| except Exception as e: | |
| print(f"Error loading model: {e}") | |
| print("Falling back to simple response generation...") | |
| return model, tokenizer | |
| def load_syllabus_materials() -> Dict[str, List[str]]: | |
| """ | |
| Scan the Syllabus folder structure and return available materials | |
| Returns: Dict with education level as key and list of subject PDFs as value | |
| """ | |
| syllabus_structure = {} | |
| base_path = Path("Syllabus") | |
| if not base_path.exists(): | |
| os.makedirs(base_path, exist_ok=True) | |
| return syllabus_structure | |
| for level in EDUCATION_LEVELS: | |
| level_path = base_path / level.replace("_", " ").title().replace(" ", "_") | |
| materials = [] | |
| if level_path.exists(): | |
| pdf_files = list(level_path.glob("*.pdf")) | |
| materials = [pdf.stem for pdf in pdf_files] | |
| syllabus_structure[level] = materials | |
| return syllabus_structure | |
| def get_relevant_materials(level: str, question: str) -> List[str]: | |
| """ | |
| Find relevant syllabus materials based on the question | |
| Simple keyword matching for now | |
| """ | |
| syllabus = load_syllabus_materials() | |
| available = syllabus.get(level, []) | |
| if not available: | |
| return [] | |
| question_lower = question.lower() | |
| relevant = [] | |
| common_subjects = { | |
| "math": ["mathematics", "math", "algebra", "geometry", "calculus"], | |
| "science": ["science", "physics", "chemistry", "biology"], | |
| "english": ["english", "literature", "grammar", "writing"], | |
| "history": ["history", "social studies", "geography"], | |
| "computer": ["computer", "programming", "coding", "technology"] | |
| } | |
| for material in available: | |
| material_lower = material.lower() | |
| if any(keyword in question_lower for keyword in material_lower.split()): | |
| relevant.append(material) | |
| else: | |
| for subject, keywords in common_subjects.items(): | |
| if material_lower in keywords and any(kw in question_lower for kw in keywords): | |
| relevant.append(material) | |
| break | |
| return relevant | |
| def generate_ai_response(question: str, level: str, has_materials: bool, materials: List[str]) -> str: | |
| """ | |
| Generate AI response using DialoGPT with level-appropriate explanation | |
| """ | |
| global model, tokenizer | |
| if model is None or tokenizer is None: | |
| load_ai_model() | |
| level_prompt = LEVEL_PROMPTS.get(level, LEVEL_PROMPTS["junior_secondary"]) | |
| if has_materials: | |
| materials_info = f"\n\nI have reference materials on: {', '.join(materials)}. " | |
| else: | |
| materials_info = "\n\nNote: I don't have specific syllabus materials for this topic. I recommend consulting your teacher for course-specific content. " | |
| context = f"{level_prompt}\n\nStudent question: {question}" | |
| try: | |
| if model is not None and tokenizer is not None: | |
| inputs = tokenizer.encode(context + tokenizer.eos_token, return_tensors="pt", max_length=512, truncation=True) | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| inputs, | |
| max_length=inputs.shape[1] + 150, | |
| num_return_sequences=1, | |
| temperature=0.8, | |
| top_p=0.9, | |
| do_sample=True, | |
| pad_token_id=tokenizer.pad_token_id, | |
| eos_token_id=tokenizer.eos_token_id | |
| ) | |
| response = tokenizer.decode(outputs[0][inputs.shape[1]:], skip_special_tokens=True) | |
| if response.strip(): | |
| return response.strip() + materials_info | |
| response = generate_fallback_response(question, level) | |
| return response + materials_info | |
| except Exception as e: | |
| print(f"Error generating AI response: {e}") | |
| response = generate_fallback_response(question, level) | |
| return response + materials_info | |
| def generate_fallback_response(question: str, level: str) -> str: | |
| """ | |
| Generate a simple fallback response when AI model fails | |
| """ | |
| responses = { | |
| "primary": "That's a great question! Let me help you understand this in a simple way. ", | |
| "junior_secondary": "Good question! Let me explain this clearly. ", | |
| "senior_secondary": "Excellent question! Let's explore this concept. ", | |
| "undergraduate": "That's an interesting question. Let's analyze this systematically. " | |
| } | |
| base_response = responses.get(level, "That's a good question! ") | |
| question_lower = question.lower() | |
| if any(word in question_lower for word in ["what", "define", "explain"]): | |
| return base_response + "This topic requires detailed explanation. Please consult your course materials or teacher for comprehensive understanding." | |
| elif any(word in question_lower for word in ["how", "solve", "calculate"]): | |
| return base_response + "For this problem, I recommend working through it step by step with your teacher or using your course materials as reference." | |
| else: | |
| return base_response + "I'm here to help guide your learning. For the most accurate and course-specific information, please refer to your syllabus materials." | |
| # FastAPI Endpoints | |
| def read_root(): | |
| """Root endpoint""" | |
| return { | |
| "message": "Welcome to Learnova AI Backend", | |
| "version": "1.0.0", | |
| "endpoints": ["/api/start-session", "/api/chat", "/api/syllabus-status"] | |
| } | |
| def start_session(request: StartSessionRequest): | |
| """ | |
| Start a new tutoring session for a user | |
| """ | |
| if request.education_level.lower() not in EDUCATION_LEVELS: | |
| raise HTTPException( | |
| status_code=400, | |
| detail=f"Invalid education level. Must be one of: {', '.join(EDUCATION_LEVELS)}" | |
| ) | |
| user_sessions[request.user_id] = { | |
| "education_level": request.education_level.lower(), | |
| "started_at": datetime.now().isoformat(), | |
| "chat_history": [] | |
| } | |
| return SessionResponse( | |
| user_id=request.user_id, | |
| education_level=request.education_level.lower(), | |
| session_started=user_sessions[request.user_id]["started_at"], | |
| message=f"Session started for {request.education_level} level. Ready to help you learn!" | |
| ) | |
| def chat(request: ChatRequest): | |
| """ | |
| Process a chat message and return AI-generated response | |
| """ | |
| if request.user_id not in user_sessions: | |
| raise HTTPException( | |
| status_code=404, | |
| detail="Session not found. Please start a session first using /api/start-session" | |
| ) | |
| session = user_sessions[request.user_id] | |
| level = session["education_level"] | |
| relevant_materials = get_relevant_materials(level, request.message) | |
| has_materials = len(relevant_materials) > 0 | |
| ai_response = generate_ai_response( | |
| request.message, | |
| level, | |
| has_materials, | |
| relevant_materials | |
| ) | |
| chat_entry = { | |
| "timestamp": datetime.now().isoformat(), | |
| "user_message": request.message, | |
| "ai_response": ai_response, | |
| "materials_referenced": relevant_materials | |
| } | |
| session["chat_history"].append(chat_entry) | |
| return ChatResponse( | |
| response=ai_response, | |
| has_syllabus_materials=has_materials, | |
| available_materials=relevant_materials, | |
| timestamp=chat_entry["timestamp"] | |
| ) | |
| def get_syllabus_status(): | |
| """ | |
| Check available syllabus materials across all levels | |
| """ | |
| syllabus = load_syllabus_materials() | |
| total = sum(len(materials) for materials in syllabus.values()) | |
| return SyllabusStatusResponse( | |
| levels=syllabus, | |
| total_materials=total | |
| ) | |
| def get_active_sessions(): | |
| """ | |
| Get all active sessions (admin/debug endpoint) | |
| """ | |
| return { | |
| "active_sessions": len(user_sessions), | |
| "sessions": { | |
| user_id: { | |
| "level": session["education_level"], | |
| "started": session["started_at"], | |
| "messages": len(session["chat_history"]) | |
| } | |
| for user_id, session in user_sessions.items() | |
| } | |
| } | |
| # Gradio Interface | |
| def gradio_start_session(user_id: str, education_level: str): | |
| """Gradio function to start session""" | |
| try: | |
| response = start_session(StartSessionRequest(user_id=user_id, education_level=education_level)) | |
| return f"β {response.message}" | |
| except HTTPException as e: | |
| return f"β Error: {e.detail}" | |
| def gradio_chat(user_id: str, message: str): | |
| """Gradio function for chat""" | |
| try: | |
| response = chat(ChatRequest(user_id=user_id, message=message)) | |
| materials_info = "" | |
| if response.has_syllabus_materials: | |
| materials_info = f"\n\nπ Reference materials: {', '.join(response.available_materials)}" | |
| return f"{response.response}{materials_info}" | |
| except HTTPException as e: | |
| return f"β Error: {e.detail}" | |
| def gradio_syllabus_status(): | |
| """Gradio function to check syllabus status""" | |
| response = get_syllabus_status() | |
| status = f"π Total Materials: {response.total_materials}\n\n" | |
| for level, materials in response.levels.items(): | |
| status += f"**{level.replace('_', ' ').title()}**: {len(materials)} materials\n" | |
| if materials: | |
| status += f" - {', '.join(materials)}\n" | |
| return status | |
| # Create Gradio interface | |
| with gr.Blocks(title="Learnova AI Admin Panel", theme=gr.themes.Soft()) as gradio_app: | |
| gr.Markdown("# π Learnova AI - Admin Panel") | |
| gr.Markdown("Test and manage the Learnova AI tutoring system") | |
| with gr.Tab("Start Session"): | |
| gr.Markdown("### Start a New Tutoring Session") | |
| with gr.Row(): | |
| user_id_input = gr.Textbox(label="User ID", placeholder="Enter user ID (e.g., student123)") | |
| level_input = gr.Dropdown( | |
| choices=["primary", "junior_secondary", "senior_secondary", "undergraduate"], | |
| label="Education Level" | |
| ) | |
| start_btn = gr.Button("Start Session", variant="primary") | |
| start_output = gr.Textbox(label="Result", lines=2) | |
| start_btn.click(gradio_start_session, inputs=[user_id_input, level_input], outputs=start_output) | |
| with gr.Tab("Chat"): | |
| gr.Markdown("### Chat with AI Tutor") | |
| chat_user_id = gr.Textbox(label="User ID", placeholder="Enter your user ID") | |
| chat_message = gr.Textbox(label="Your Question", placeholder="Ask a question...", lines=3) | |
| chat_btn = gr.Button("Send", variant="primary") | |
| chat_output = gr.Textbox(label="AI Response", lines=8) | |
| chat_btn.click(gradio_chat, inputs=[chat_user_id, chat_message], outputs=chat_output) | |
| with gr.Tab("Syllabus Status"): | |
| gr.Markdown("### Check Available Syllabus Materials") | |
| status_btn = gr.Button("Refresh Status", variant="primary") | |
| status_output = gr.Markdown() | |
| status_btn.click(gradio_syllabus_status, outputs=status_output) | |
| gr.Markdown(""" | |
| ### π Upload Materials | |
| To add syllabus materials: | |
| 1. Create PDF files for your subjects | |
| 2. Upload them to the appropriate folder: | |
| - `Syllabus/Primary/` for primary level | |
| - `Syllabus/Junior_Secondary/` for junior secondary | |
| - `Syllabus/Senior_Secondary/` for senior secondary | |
| - `Syllabus/Undergraduate/` for undergraduate | |
| 3. Refresh the status to see updated materials | |
| """) | |
| with gr.Tab("API Documentation"): | |
| gr.Markdown(""" | |
| ### π API Endpoints | |
| **1. Start Session** | |
| ``` | |
| POST /api/start-session | |
| { | |
| "user_id": "string", | |
| "education_level": "primary|junior_secondary|senior_secondary|undergraduate" | |
| } | |
| ``` | |
| **2. Chat** | |
| ``` | |
| POST /api/chat | |
| { | |
| "user_id": "string", | |
| "message": "string" | |
| } | |
| ``` | |
| **3. Syllabus Status** | |
| ``` | |
| GET /api/syllabus-status | |
| ``` | |
| **4. Active Sessions** | |
| ``` | |
| GET /api/sessions | |
| ``` | |
| """) | |
| # Mount Gradio app to FastAPI | |
| app = gr.mount_gradio_app(app, gradio_app, path="/") | |
| if __name__ == "__main__": | |
| import uvicorn | |
| print("π Initializing Learnova AI Backend...") | |
| print("π Loading AI model...") | |
| load_ai_model() | |
| print("β Ready!") | |
| uvicorn.run(app, host="0.0.0.0", port=7860) |