Spaces:
Sleeping
Sleeping
File size: 5,068 Bytes
a522797 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
#!/usr/bin/env python3
"""
Unified runner that works on any hardware
Automatically adapts to available resources
"""
import sys
import argparse
from config import get_config
def run_streamlit_app():
"""Run the standard Streamlit UI"""
import streamlit.web.cli as stcli
sys.argv = ["streamlit", "run", "ui.py"]
sys.exit(stcli.main())
def run_agent_mode():
"""Run the autonomous agent"""
from agentic_optimizer import LocalAgentRunner, StudentProfile
print("Starting Agentic Mode...")
runner = LocalAgentRunner("neu_graph_analyzed_clean.pkl")
# Demo: Add a test student
student = StudentProfile(
student_id="demo",
completed_courses=["CS1800", "CS2500"],
current_gpa=3.5,
interests=["AI", "Machine Learning"],
career_goals="ML Engineer",
learning_style="Visual",
time_commitment=40,
preferred_difficulty="moderate"
)
student_id = runner.add_student(student)
print(f"Tracking student: {student_id}")
# Start agent
runner.start_agent()
def run_api_server():
"""Run as REST API server"""
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import uvicorn
import pickle
# Load optimizer
from curriculum_optimizer import HybridOptimizer, StudentProfile
app = FastAPI(title="Curriculum Optimizer API")
# Load model once
optimizer = HybridOptimizer()
optimizer.load_models()
with open("neu_graph_analyzed_clean.pkl", 'rb') as f:
graph = pickle.load(f)
optimizer.load_data(graph)
class PlanRequest(BaseModel):
completed_courses: list
gpa: float = 3.5
interests: list
career_goals: str
learning_style: str = "Visual"
time_commitment: int = 40
preferred_difficulty: str = "moderate"
@app.post("/generate_plan")
async def generate_plan(request: PlanRequest):
profile = StudentProfile(
completed_courses=request.completed_courses,
current_gpa=request.gpa,
interests=request.interests,
career_goals=request.career_goals,
learning_style=request.learning_style,
time_commitment=request.time_commitment,
preferred_difficulty=request.preferred_difficulty
)
plan = optimizer.generate_plan(profile)
return plan
@app.get("/health")
async def health():
return {"status": "healthy", "device": str(optimizer.device)}
print("Starting API server on http://localhost:8000")
print("API docs at http://localhost:8000/docs")
uvicorn.run(app, host="0.0.0.0", port=8000)
def test_hardware():
"""Test what hardware is available"""
import torch
print("=" * 60)
print("HARDWARE TEST")
print("=" * 60)
if torch.cuda.is_available():
print(f"✓ CUDA available")
print(f" Device: {torch.cuda.get_device_name(0)}")
print(f" Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f}GB")
print(f" Compute: {torch.cuda.get_device_properties(0).major}.{torch.cuda.get_device_properties(0).minor}")
else:
print("✗ No CUDA (CPU only)")
print(f"\nSelected Config: {get_config().__name__}")
config = get_config()
print(f" LLM: {config.LLM_MODEL or 'None (embeddings only)'}")
print(f" Embedder: {config.EMBEDDING_MODEL}")
print(f" Quantization: {config.QUANTIZATION or 'None'}")
print("\nRecommended mode based on hardware:")
if torch.cuda.is_available() and torch.cuda.get_device_properties(0).total_memory > 10e9:
print(" → Use 'streamlit' or 'agent' mode (full features)")
else:
print(" → Use 'api' mode (lightweight)")
def main():
parser = argparse.ArgumentParser(description="Curriculum Optimizer Runner")
parser.add_argument(
"mode",
choices=["streamlit", "agent", "api", "test"],
help="Run mode: streamlit (UI), agent (autonomous), api (REST server), test (hardware test)"
)
parser.add_argument(
"--config",
choices=["h200", "colab", "local", "cpu", "minimal"],
help="Force specific configuration"
)
args = parser.parse_args()
# Set config if specified
if args.config:
import os
os.environ["CURRICULUM_CONFIG"] = args.config
# Run selected mode
if args.mode == "streamlit":
run_streamlit_app()
elif args.mode == "agent":
run_agent_mode()
elif args.mode == "api":
run_api_server()
elif args.mode == "test":
test_hardware()
if __name__ == "__main__":
if len(sys.argv) == 1:
# No arguments - run hardware test
test_hardware()
print("\nUsage: python run.py [streamlit|agent|api|test]")
else:
main() |