Update backend/api.py
Browse files- backend/api.py +37 -26
backend/api.py
CHANGED
|
@@ -1,45 +1,42 @@
|
|
| 1 |
from fastapi import FastAPI
|
| 2 |
from fastapi.middleware.cors import CORSMiddleware
|
| 3 |
from pydantic import BaseModel
|
| 4 |
-
from transformers import AutoTokenizer
|
|
|
|
| 5 |
import torch
|
| 6 |
import pandas as pd
|
| 7 |
from pathlib import Path
|
| 8 |
|
| 9 |
app = FastAPI()
|
| 10 |
|
| 11 |
-
# === CORS
|
| 12 |
app.add_middleware(
|
| 13 |
CORSMiddleware,
|
| 14 |
-
allow_origins=["*"],
|
| 15 |
allow_credentials=True,
|
| 16 |
allow_methods=["*"],
|
| 17 |
allow_headers=["*"],
|
| 18 |
)
|
| 19 |
|
| 20 |
-
# ===
|
| 21 |
BASE_DIR = Path(__file__).resolve().parent
|
| 22 |
-
|
|
|
|
| 23 |
DATASET_PATH = BASE_DIR / "dataset_chatbot_template.xlsx"
|
| 24 |
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
model.to(device)
|
| 31 |
-
model.eval()
|
| 32 |
-
print("β
Model loaded successfully!")
|
| 33 |
-
except Exception as e:
|
| 34 |
-
print(f"β Model load error: {e}")
|
| 35 |
|
| 36 |
-
# === Load
|
| 37 |
try:
|
| 38 |
df_jawaban = pd.read_excel(DATASET_PATH)
|
| 39 |
except Exception:
|
| 40 |
df_jawaban = pd.DataFrame(columns=["Intent", "Jawaban_ID"])
|
| 41 |
|
| 42 |
-
# === Default
|
| 43 |
responses = {
|
| 44 |
"about_me": "I am a passionate developer specializing in AI and web development.",
|
| 45 |
"skills": "My main skills are HTML5, CSS3, JavaScript, Laravel, Node.js, TensorFlow, and PyTorch.",
|
|
@@ -50,31 +47,45 @@ responses = {
|
|
| 50 |
"fallback": "I'm sorry, I don't understand. Please try another question."
|
| 51 |
}
|
| 52 |
|
|
|
|
| 53 |
class ChatRequest(BaseModel):
|
| 54 |
text: str
|
| 55 |
|
| 56 |
@app.get("/")
|
| 57 |
async def root():
|
| 58 |
-
return {"message": "π Chatbot API running on Hugging Face"}
|
| 59 |
|
| 60 |
@app.post("/chatbot")
|
| 61 |
-
async def
|
| 62 |
try:
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
|
| 68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
|
|
|
|
| 70 |
if not df_jawaban.empty and intent in df_jawaban["Intent"].values:
|
| 71 |
reply = df_jawaban.loc[df_jawaban["Intent"] == intent, "Jawaban_ID"].iloc[0]
|
| 72 |
else:
|
| 73 |
reply = responses.get(intent, responses["fallback"])
|
| 74 |
|
| 75 |
-
# β
agar cocok dengan frontend
|
| 76 |
return {"reply": reply, "intent": intent}
|
| 77 |
|
| 78 |
except Exception as e:
|
| 79 |
-
print(f"β Runtime
|
| 80 |
return {"reply": "β οΈ Internal server error.", "intent": "error"}
|
|
|
|
| 1 |
from fastapi import FastAPI
|
| 2 |
from fastapi.middleware.cors import CORSMiddleware
|
| 3 |
from pydantic import BaseModel
|
| 4 |
+
from transformers import AutoTokenizer
|
| 5 |
+
import onnxruntime as ort
|
| 6 |
import torch
|
| 7 |
import pandas as pd
|
| 8 |
from pathlib import Path
|
| 9 |
|
| 10 |
app = FastAPI()
|
| 11 |
|
| 12 |
+
# === CORS untuk frontend di Vercel ===
|
| 13 |
app.add_middleware(
|
| 14 |
CORSMiddleware,
|
| 15 |
+
allow_origins=["*"],
|
| 16 |
allow_credentials=True,
|
| 17 |
allow_methods=["*"],
|
| 18 |
allow_headers=["*"],
|
| 19 |
)
|
| 20 |
|
| 21 |
+
# === Path setup ===
|
| 22 |
BASE_DIR = Path(__file__).resolve().parent
|
| 23 |
+
MODEL_PATH = BASE_DIR / "bert_chatbot_model.onnx"
|
| 24 |
+
TOKENIZER_PATH = BASE_DIR / "bert_chatbot_tokenizer"
|
| 25 |
DATASET_PATH = BASE_DIR / "dataset_chatbot_template.xlsx"
|
| 26 |
|
| 27 |
+
# === Load tokenizer dan model ===
|
| 28 |
+
print("π Loading ONNX model...")
|
| 29 |
+
tokenizer = AutoTokenizer.from_pretrained(str(TOKENIZER_PATH))
|
| 30 |
+
session = ort.InferenceSession(str(MODEL_PATH), providers=["CPUExecutionProvider"])
|
| 31 |
+
print("β
ONNX model loaded!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
+
# === Load dataset (optional) ===
|
| 34 |
try:
|
| 35 |
df_jawaban = pd.read_excel(DATASET_PATH)
|
| 36 |
except Exception:
|
| 37 |
df_jawaban = pd.DataFrame(columns=["Intent", "Jawaban_ID"])
|
| 38 |
|
| 39 |
+
# === Default responses ===
|
| 40 |
responses = {
|
| 41 |
"about_me": "I am a passionate developer specializing in AI and web development.",
|
| 42 |
"skills": "My main skills are HTML5, CSS3, JavaScript, Laravel, Node.js, TensorFlow, and PyTorch.",
|
|
|
|
| 47 |
"fallback": "I'm sorry, I don't understand. Please try another question."
|
| 48 |
}
|
| 49 |
|
| 50 |
+
# === Request schema ===
|
| 51 |
class ChatRequest(BaseModel):
|
| 52 |
text: str
|
| 53 |
|
| 54 |
@app.get("/")
|
| 55 |
async def root():
|
| 56 |
+
return {"message": "π ONNX Chatbot API running on Hugging Face"}
|
| 57 |
|
| 58 |
@app.post("/chatbot")
|
| 59 |
+
async def chatbot(req: ChatRequest):
|
| 60 |
try:
|
| 61 |
+
# Tokenize input
|
| 62 |
+
inputs = tokenizer(req.text, return_tensors="pt", padding=True, truncation=True, max_length=128)
|
| 63 |
+
|
| 64 |
+
# Convert to numpy for ONNX
|
| 65 |
+
ort_inputs = {k: v.cpu().numpy() for k, v in inputs.items()}
|
| 66 |
+
ort_outputs = session.run(None, ort_inputs)
|
| 67 |
+
logits = torch.tensor(ort_outputs[0])
|
| 68 |
+
pred_id = torch.argmax(logits, dim=1).item()
|
| 69 |
|
| 70 |
+
# === Mapping ID ke label ===
|
| 71 |
+
id2label = {
|
| 72 |
+
0: "about_me",
|
| 73 |
+
1: "skills",
|
| 74 |
+
2: "projects",
|
| 75 |
+
3: "experience",
|
| 76 |
+
4: "career_goal",
|
| 77 |
+
5: "greeting",
|
| 78 |
+
}
|
| 79 |
+
intent = id2label.get(pred_id, "fallback")
|
| 80 |
|
| 81 |
+
# === Ambil jawaban ===
|
| 82 |
if not df_jawaban.empty and intent in df_jawaban["Intent"].values:
|
| 83 |
reply = df_jawaban.loc[df_jawaban["Intent"] == intent, "Jawaban_ID"].iloc[0]
|
| 84 |
else:
|
| 85 |
reply = responses.get(intent, responses["fallback"])
|
| 86 |
|
|
|
|
| 87 |
return {"reply": reply, "intent": intent}
|
| 88 |
|
| 89 |
except Exception as e:
|
| 90 |
+
print(f"β Runtime error: {e}")
|
| 91 |
return {"reply": "β οΈ Internal server error.", "intent": "error"}
|