Spaces:
Sleeping
Sleeping
File size: 6,175 Bytes
60d856a 5ff0659 60d856a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
import os
import pickle
import pandas as pd
from fastapi import FastAPI, Request, HTTPException
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.templating import Jinja2Templates
from fastapi.middleware.cors import CORSMiddleware
from groq import Groq
app = FastAPI(title="Student Score Predictor Chatbot + Groq")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], allow_credentials=True,
allow_methods=["*"], allow_headers=["*"],
)
templates = Jinja2Templates(directory="templates")
# βββ Load model at startup βββββββββββββββββββββββββββββββββββββ
MODEL_PATH = os.getenv('MODEL_PATH', 'student_performance_model.pkl')
try:
with open(MODEL_PATH, 'rb') as f:
model = pickle.load(f)
except Exception as e:
raise RuntimeError(f"Could not load model: {e}")
# βββ Load Groq API key βββββββββββββββββββββββββββββββββββββββββ
GROQ_API_KEY = "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"
if not GROQ_API_KEY:
raise RuntimeError("Missing Groq API key. Set env var GROQ_API_KEY.")
groq_client = Groq(api_key=GROQ_API_KEY)
# βββ Chatβfields configuration βββββββββββββββββββββββββββββββββ
FIELDS = [
{'name': 'Age', 'type': 'number',
'question': 'What is your age?',
'validation': {'min': 5, 'max': 100}},
{'name': 'Gender', 'type': 'select',
'question': 'What is your gender?',
'options': ['Male', 'Female', 'Other']},
{'name': 'HoursOfStudyPerDay', 'type': 'number',
'question': 'Hours of study per day?',
'validation': {'min': 0, 'max': 24}},
{'name': 'SchoolAttendanceRate', 'type': 'number',
'question': 'School attendance rate (%)?',
'validation': {'min': 0, 'max': 100}},
{'name': 'TuitionAccess', 'type': 'select',
'question': 'Access to extra tuition?',
'options': ['Yes', 'No']},
{'name': 'AveragePreviousScores', 'type': 'number',
'question': 'Average previous score?',
'validation': {'min': 0, 'max': 100}},
{'name': 'HoursOfSleep', 'type': 'number',
'question': 'Hours of sleep per night?',
'validation': {'min': 0, 'max': 24}},
{'name': 'BreakfastDaily', 'type': 'select',
'question': 'Do you eat breakfast daily?',
'options': ['Yes', 'No']},
{'name': 'ScreenTimeHours', 'type': 'number',
'question': 'Screen time hours per day?',
'validation': {'min': 0, 'max': 24}},
{'name': 'PhysicalActivityHours', 'type': 'number',
'question': 'Physical activity hours per day?',
'validation': {'min': 0, 'max': 24}},
{'name': 'PlaysSport', 'type': 'select',
'question': 'Do you play sports?',
'options': ['Yes', 'No']},
{'name': 'MentalHealthScore', 'type': 'number',
'question': 'Rate your mental health (1β10).',
'validation': {'min': 1, 'max': 10}},
{'name': 'ParentalEducationLevel', 'type': 'select',
'question': 'Parental education level?',
'options': ['High school', 'Graduate', 'Postgrad']},
{'name': 'HouseholdIncomeLevel', 'type': 'select',
'question': 'Household income level?',
'options': ['Low', 'Medium', 'High']},
{'name': 'StudyEnvironmentRating', 'type': 'number',
'question': 'Rate your study environment (1β5).',
'validation': {'min': 1, 'max': 5}},
{'name': 'FriendSupportScore', 'type': 'number',
'question': 'Friend support score (1β10).',
'validation': {'min': 1, 'max': 10}},
{'name': 'ParticipatesInClubs', 'type': 'select',
'question': 'Do you participate in clubs?',
'options': ['Yes', 'No']},
{'name': 'PartTimeWork', 'type': 'select',
'question': 'Do you do partβtime work?',
'options': ['Yes', 'No']},
]
@app.get("/", response_class=HTMLResponse)
async def chat_ui(request: Request):
return templates.TemplateResponse("chat.html", {
"request": request,
"fields": FIELDS
})
@app.post("/predict_json")
async def predict_and_advise(payload: dict):
# β validate & cast β
data = {}
for f in FIELDS:
key = f["name"]
if key not in payload:
raise HTTPException(400, f"Missing field: {key}")
val = payload[key]
if f["type"] == "number":
try:
val = float(val)
except:
raise HTTPException(400, f"{key} must be numeric")
data[key] = val
# β range checks β
for f in FIELDS:
if f["type"] == "number" and "validation" in f:
mn, mx = f["validation"]["min"], f["validation"]["max"]
if not (mn <= data[f["name"]] <= mx):
raise HTTPException(400,
f"{f['name']} must be between {mn} and {mx}")
# β predict score β
df = pd.DataFrame([data])
score = float(model.predict(df)[0])
data["PredictedScore"] = round(score, 2)
# β build Groq chat messages β
system_msg = {
"role": "system",
"content": (
"You are an expert academic coach. "
"Given a studentβs profile data and their predicted final exam score, "
"provide a concise performance analysis and actionable improvement suggestions."
)
}
lines = [f"{k}: {v}" for k, v in data.items() if k != "PredictedScore"]
user_msg = {
"role": "user",
"content": (
"Here is the student data:\n" +
"\n".join(lines) +
f"\nPredicted final exam score: {data['PredictedScore']}\n"
"What targeted advice can you give them to improve their performance?"
)
}
# β call Groq β
resp = groq_client.chat.completions.create(
model="llama-3.3-70b-versatile",
messages=[system_msg, user_msg],
temperature=0.5,
max_completion_tokens=512,
top_p=1.0
)
advice = resp.choices[0].message.content
return JSONResponse({
"predicted": data["PredictedScore"],
"advice": advice
})
|