Spaces:
Sleeping
Sleeping
Upload 5 files
Browse files- Dockerfile +12 -0
- chat.py +50 -0
- classifier.py +15 -0
- main.py +75 -0
- requirements.txt +11 -0
Dockerfile
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.10-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
COPY requirements.txt .
|
| 6 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 7 |
+
|
| 8 |
+
COPY . .
|
| 9 |
+
|
| 10 |
+
EXPOSE 7860
|
| 11 |
+
|
| 12 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
chat.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
OPENROUTER_API_KEY = "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"
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
SYSTEM_PROMPT = """
|
| 8 |
+
You are MentorAI, an educational assistant.
|
| 9 |
+
Always answer in numbered lists.
|
| 10 |
+
One idea per line.
|
| 11 |
+
Use simple language for high school students.
|
| 12 |
+
Do not repeat points.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
# =========================
|
| 16 |
+
# AI RESPONSE GENERATOR
|
| 17 |
+
# =========================
|
| 18 |
+
def generate_response(messages):
|
| 19 |
+
try:
|
| 20 |
+
payload = {
|
| 21 |
+
"model": "xiaomi/mimo-v2-flash:free",
|
| 22 |
+
"messages": [
|
| 23 |
+
{"role": "system", "content": SYSTEM_PROMPT},
|
| 24 |
+
*messages
|
| 25 |
+
]
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
response = requests.post(
|
| 29 |
+
"https://openrouter.ai/api/v1/chat/completions",
|
| 30 |
+
headers={
|
| 31 |
+
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
|
| 32 |
+
"Content-Type": "application/json"
|
| 33 |
+
},
|
| 34 |
+
json=payload,
|
| 35 |
+
timeout=30
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
data = response.json()
|
| 39 |
+
|
| 40 |
+
if "choices" not in data:
|
| 41 |
+
return "⚠️ AI error. Try again."
|
| 42 |
+
|
| 43 |
+
return data["choices"][0]["message"]["content"]
|
| 44 |
+
|
| 45 |
+
except Exception as e:
|
| 46 |
+
<<<<<<< HEAD
|
| 47 |
+
return f"⚠️ AI service error: {str(e)}"
|
| 48 |
+
=======
|
| 49 |
+
return f"⚠️ AI service error: {str(e)}"
|
| 50 |
+
>>>>>>> 338342138b282483579892c59b03a2491140a888
|
classifier.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import pipeline
|
| 2 |
+
|
| 3 |
+
classifier = pipeline("zero-shot-classification", model = "facebook/bart-large-mnli")
|
| 4 |
+
|
| 5 |
+
LABELS = [
|
| 6 |
+
"learning question",
|
| 7 |
+
"casual conversation",
|
| 8 |
+
"homework cheating",
|
| 9 |
+
"off topic"
|
| 10 |
+
]
|
| 11 |
+
|
| 12 |
+
def is_supported_question(text: str) -> bool:
|
| 13 |
+
result = classifier(text, LABELS)
|
| 14 |
+
top_label = result["labels"][0]
|
| 15 |
+
return top_label == "learning question"
|
main.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI
|
| 2 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 3 |
+
from pydantic import BaseModel
|
| 4 |
+
from chat import generate_response
|
| 5 |
+
from classifier import is_supported_question
|
| 6 |
+
|
| 7 |
+
app = FastAPI(title="MentorAI Backend")
|
| 8 |
+
|
| 9 |
+
# =========================
|
| 10 |
+
# CORS (VERY IMPORTANT)
|
| 11 |
+
# =========================
|
| 12 |
+
app.add_middleware(
|
| 13 |
+
CORSMiddleware,
|
| 14 |
+
allow_origins=["*"], # Hugging Face requires this
|
| 15 |
+
allow_credentials=True,
|
| 16 |
+
allow_methods=["*"],
|
| 17 |
+
allow_headers=["*"],
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
# =========================
|
| 21 |
+
# SESSION MEMORY (LIGHT)
|
| 22 |
+
# =========================
|
| 23 |
+
sessions = {}
|
| 24 |
+
MAX_MESSAGES = 6
|
| 25 |
+
|
| 26 |
+
class ChatRequest(BaseModel):
|
| 27 |
+
session_id: str
|
| 28 |
+
message: str
|
| 29 |
+
|
| 30 |
+
# =========================
|
| 31 |
+
# HEALTH CHECK
|
| 32 |
+
# =========================
|
| 33 |
+
@app.get("/")
|
| 34 |
+
def home():
|
| 35 |
+
return {"status": "MentorAI backend running"}
|
| 36 |
+
|
| 37 |
+
# =========================
|
| 38 |
+
# CHAT ENDPOINT
|
| 39 |
+
# =========================
|
| 40 |
+
@app.post("/chat")
|
| 41 |
+
def chat(req: ChatRequest):
|
| 42 |
+
if not is_supported_question(req.message):
|
| 43 |
+
return {
|
| 44 |
+
"response": "⚠️ MentorAI only answers learning-related questions."
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
if req.session_id not in sessions:
|
| 48 |
+
sessions[req.session_id] = []
|
| 49 |
+
|
| 50 |
+
sessions[req.session_id].append({
|
| 51 |
+
"role": "user",
|
| 52 |
+
"content": req.message.strip()
|
| 53 |
+
})
|
| 54 |
+
|
| 55 |
+
sessions[req.session_id] = sessions[req.session_id][-MAX_MESSAGES:]
|
| 56 |
+
|
| 57 |
+
ai_reply = generate_response(sessions[req.session_id])
|
| 58 |
+
|
| 59 |
+
# Deduplicate lines
|
| 60 |
+
seen = set()
|
| 61 |
+
clean_lines = []
|
| 62 |
+
for line in ai_reply.split("\n"):
|
| 63 |
+
line = line.strip()
|
| 64 |
+
if line and line not in seen:
|
| 65 |
+
seen.add(line)
|
| 66 |
+
clean_lines.append(line)
|
| 67 |
+
|
| 68 |
+
final_reply = "\n".join(clean_lines)
|
| 69 |
+
|
| 70 |
+
sessions[req.session_id].append({
|
| 71 |
+
"role": "assistant",
|
| 72 |
+
"content": final_reply
|
| 73 |
+
})
|
| 74 |
+
|
| 75 |
+
return {"response": final_reply}
|
requirements.txt
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<<<<<<< HEAD
|
| 2 |
+
fastapi
|
| 3 |
+
uvicorn
|
| 4 |
+
requests
|
| 5 |
+
|
| 6 |
+
=======
|
| 7 |
+
fastapi
|
| 8 |
+
uvicorn
|
| 9 |
+
requests
|
| 10 |
+
|
| 11 |
+
>>>>>>> 338342138b282483579892c59b03a2491140a888
|