Spaces:
Running
Running
| #!/usr/bin/env python3 | |
| """ | |
| Unified runner that works on any hardware | |
| Automatically adapts to available resources | |
| """ | |
| import sys | |
| import argparse | |
| from config import get_config | |
| def run_streamlit_app(): | |
| """Run the standard Streamlit UI""" | |
| import streamlit.web.cli as stcli | |
| sys.argv = ["streamlit", "run", "ui.py"] | |
| sys.exit(stcli.main()) | |
| def run_agent_mode(): | |
| """Run the autonomous agent""" | |
| from agentic_optimizer import LocalAgentRunner, StudentProfile | |
| print("Starting Agentic Mode...") | |
| runner = LocalAgentRunner("neu_graph_analyzed_clean.pkl") | |
| # Demo: Add a test student | |
| student = StudentProfile( | |
| student_id="demo", | |
| completed_courses=["CS1800", "CS2500"], | |
| current_gpa=3.5, | |
| interests=["AI", "Machine Learning"], | |
| career_goals="ML Engineer", | |
| learning_style="Visual", | |
| time_commitment=40, | |
| preferred_difficulty="moderate" | |
| ) | |
| student_id = runner.add_student(student) | |
| print(f"Tracking student: {student_id}") | |
| # Start agent | |
| runner.start_agent() | |
| def run_api_server(): | |
| """Run as REST API server""" | |
| from fastapi import FastAPI, HTTPException | |
| from pydantic import BaseModel | |
| import uvicorn | |
| import pickle | |
| # Load optimizer | |
| from curriculum_optimizer import HybridOptimizer, StudentProfile | |
| app = FastAPI(title="Curriculum Optimizer API") | |
| # Load model once | |
| optimizer = HybridOptimizer() | |
| optimizer.load_models() | |
| with open("neu_graph_analyzed_clean.pkl", 'rb') as f: | |
| graph = pickle.load(f) | |
| optimizer.load_data(graph) | |
| class PlanRequest(BaseModel): | |
| completed_courses: list | |
| gpa: float = 3.5 | |
| interests: list | |
| career_goals: str | |
| learning_style: str = "Visual" | |
| time_commitment: int = 40 | |
| preferred_difficulty: str = "moderate" | |
| async def generate_plan(request: PlanRequest): | |
| profile = StudentProfile( | |
| completed_courses=request.completed_courses, | |
| current_gpa=request.gpa, | |
| interests=request.interests, | |
| career_goals=request.career_goals, | |
| learning_style=request.learning_style, | |
| time_commitment=request.time_commitment, | |
| preferred_difficulty=request.preferred_difficulty | |
| ) | |
| plan = optimizer.generate_plan(profile) | |
| return plan | |
| async def health(): | |
| return {"status": "healthy", "device": str(optimizer.device)} | |
| print("Starting API server on http://localhost:8000") | |
| print("API docs at http://localhost:8000/docs") | |
| uvicorn.run(app, host="0.0.0.0", port=8000) | |
| def test_hardware(): | |
| """Test what hardware is available""" | |
| import torch | |
| print("=" * 60) | |
| print("HARDWARE TEST") | |
| print("=" * 60) | |
| if torch.cuda.is_available(): | |
| print(f"✓ CUDA available") | |
| print(f" Device: {torch.cuda.get_device_name(0)}") | |
| print(f" Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f}GB") | |
| print(f" Compute: {torch.cuda.get_device_properties(0).major}.{torch.cuda.get_device_properties(0).minor}") | |
| else: | |
| print("✗ No CUDA (CPU only)") | |
| print(f"\nSelected Config: {get_config().__name__}") | |
| config = get_config() | |
| print(f" LLM: {config.LLM_MODEL or 'None (embeddings only)'}") | |
| print(f" Embedder: {config.EMBEDDING_MODEL}") | |
| print(f" Quantization: {config.QUANTIZATION or 'None'}") | |
| print("\nRecommended mode based on hardware:") | |
| if torch.cuda.is_available() and torch.cuda.get_device_properties(0).total_memory > 10e9: | |
| print(" → Use 'streamlit' or 'agent' mode (full features)") | |
| else: | |
| print(" → Use 'api' mode (lightweight)") | |
| def main(): | |
| parser = argparse.ArgumentParser(description="Curriculum Optimizer Runner") | |
| parser.add_argument( | |
| "mode", | |
| choices=["streamlit", "agent", "api", "test"], | |
| help="Run mode: streamlit (UI), agent (autonomous), api (REST server), test (hardware test)" | |
| ) | |
| parser.add_argument( | |
| "--config", | |
| choices=["h200", "colab", "local", "cpu", "minimal"], | |
| help="Force specific configuration" | |
| ) | |
| args = parser.parse_args() | |
| # Set config if specified | |
| if args.config: | |
| import os | |
| os.environ["CURRICULUM_CONFIG"] = args.config | |
| # Run selected mode | |
| if args.mode == "streamlit": | |
| run_streamlit_app() | |
| elif args.mode == "agent": | |
| run_agent_mode() | |
| elif args.mode == "api": | |
| run_api_server() | |
| elif args.mode == "test": | |
| test_hardware() | |
| if __name__ == "__main__": | |
| if len(sys.argv) == 1: | |
| # No arguments - run hardware test | |
| test_hardware() | |
| print("\nUsage: python run.py [streamlit|agent|api|test]") | |
| else: | |
| main() |