Update backend/api.py
Browse files- backend/api.py +16 -22
backend/api.py
CHANGED
|
@@ -1,12 +1,12 @@
|
|
|
|
|
| 1 |
from fastapi import FastAPI
|
| 2 |
from fastapi.middleware.cors import CORSMiddleware
|
| 3 |
from pydantic import BaseModel
|
| 4 |
from transformers import AutoTokenizer
|
| 5 |
import onnxruntime as ort
|
| 6 |
-
import numpy as np
|
| 7 |
-
import pandas as pd
|
| 8 |
from pathlib import Path
|
| 9 |
-
import traceback
|
| 10 |
|
| 11 |
app = FastAPI()
|
| 12 |
|
|
@@ -22,29 +22,26 @@ app.add_middleware(
|
|
| 22 |
# === Path setup ===
|
| 23 |
BASE_DIR = Path(__file__).resolve().parent
|
| 24 |
|
| 25 |
-
#
|
| 26 |
-
# Jika file Anda bernama bert_chatbot.onnx dan berada di models/
|
| 27 |
MODEL_PATH = BASE_DIR / "models" / "bert_chatbot.onnx"
|
| 28 |
TOKENIZER_PATH = BASE_DIR / "models" / "bert-base-multilingual-cased"
|
| 29 |
-
DATASET_PATH = BASE_DIR / "dataset_chatbot_template.xlsx"
|
| 30 |
|
| 31 |
# === Global Variables ===
|
| 32 |
tokenizer = None
|
| 33 |
session = None
|
| 34 |
-
df_jawaban
|
|
|
|
| 35 |
|
| 36 |
# === Load tokenizer dan model ===
|
| 37 |
try:
|
| 38 |
print("🚀 Loading ONNX model...")
|
| 39 |
|
| 40 |
-
# 1. Muat Tokenizer
|
| 41 |
tokenizer = AutoTokenizer.from_pretrained(str(TOKENIZER_PATH))
|
| 42 |
-
|
| 43 |
-
# 2. Muat ONNX Runtime Session (Provider CPU adalah yang paling stabil di HF Free Tier)
|
| 44 |
session = ort.InferenceSession(str(MODEL_PATH), providers=["CPUExecutionProvider"])
|
| 45 |
|
| 46 |
-
#
|
| 47 |
-
df_jawaban = pd.read_excel(DATASET_PATH)
|
| 48 |
|
| 49 |
print("✅ ONNX model loaded!")
|
| 50 |
|
|
@@ -57,7 +54,7 @@ except Exception as e:
|
|
| 57 |
pass
|
| 58 |
|
| 59 |
|
| 60 |
-
# === Default responses ===
|
| 61 |
responses = {
|
| 62 |
"about_me": "I am a passionate developer specializing in AI and web development.",
|
| 63 |
"skills": "My main skills are HTML5, CSS3, JavaScript, Laravel, Node.js, TensorFlow, and PyTorch.",
|
|
@@ -84,7 +81,6 @@ async def chatbot(req: ChatRequest):
|
|
| 84 |
|
| 85 |
try:
|
| 86 |
# 1. Tokenisasi (return_tensors="np" karena kita menggunakan NumPy/ONNX)
|
| 87 |
-
# return_tensors="np" menghemat konversi PyTorch
|
| 88 |
inputs = tokenizer(req.text, return_tensors="np", padding=True, truncation=True, max_length=128)
|
| 89 |
|
| 90 |
# 2. Dapatkan nama input dari ONNX Session
|
|
@@ -100,24 +96,22 @@ async def chatbot(req: ChatRequest):
|
|
| 100 |
logits = ort_outputs[0]
|
| 101 |
pred_id = np.argmax(logits, axis=1)[0]
|
| 102 |
|
| 103 |
-
# === Mapping ID ke label
|
| 104 |
id2label = {
|
| 105 |
0: "about_me", 1: "skills", 2: "projects", 3: "experience",
|
| 106 |
4: "career_goal", 5: "greeting",
|
| 107 |
}
|
| 108 |
intent = id2label.get(pred_id, "fallback")
|
| 109 |
|
| 110 |
-
# === Ambil jawaban ===
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
else:
|
| 115 |
-
reply = responses.get(intent, responses["fallback"])
|
| 116 |
-
|
| 117 |
return {"reply": str(reply), "intent": intent}
|
| 118 |
|
| 119 |
except Exception as e:
|
| 120 |
import traceback
|
| 121 |
print(f"❌ Runtime error: {e}")
|
| 122 |
traceback.print_exc()
|
|
|
|
| 123 |
return {"reply": "⚠️ Internal server error.", "intent": "error"}
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
from fastapi import FastAPI
|
| 3 |
from fastapi.middleware.cors import CORSMiddleware
|
| 4 |
from pydantic import BaseModel
|
| 5 |
from transformers import AutoTokenizer
|
| 6 |
import onnxruntime as ort
|
| 7 |
+
import numpy as np
|
|
|
|
| 8 |
from pathlib import Path
|
| 9 |
+
import traceback
|
| 10 |
|
| 11 |
app = FastAPI()
|
| 12 |
|
|
|
|
| 22 |
# === Path setup ===
|
| 23 |
BASE_DIR = Path(__file__).resolve().parent
|
| 24 |
|
| 25 |
+
# PERBAIKAN PATH: Sesuaikan 'bert_chatbot.onnx' dengan nama file di folder 'models'
|
|
|
|
| 26 |
MODEL_PATH = BASE_DIR / "models" / "bert_chatbot.onnx"
|
| 27 |
TOKENIZER_PATH = BASE_DIR / "models" / "bert-base-multilingual-cased"
|
| 28 |
+
DATASET_PATH = BASE_DIR / "dataset_chatbot_template.xlsx" # File ini hanya untuk melihat Intent
|
| 29 |
|
| 30 |
# === Global Variables ===
|
| 31 |
tokenizer = None
|
| 32 |
session = None
|
| 33 |
+
# df_jawaban tidak diperlukan untuk lookup respons, tapi kita muat untuk mencegah error.
|
| 34 |
+
df_jawaban = None
|
| 35 |
|
| 36 |
# === Load tokenizer dan model ===
|
| 37 |
try:
|
| 38 |
print("🚀 Loading ONNX model...")
|
| 39 |
|
|
|
|
| 40 |
tokenizer = AutoTokenizer.from_pretrained(str(TOKENIZER_PATH))
|
|
|
|
|
|
|
| 41 |
session = ort.InferenceSession(str(MODEL_PATH), providers=["CPUExecutionProvider"])
|
| 42 |
|
| 43 |
+
# Memuat DataFrame: Hanya untuk memastikan file ada, bukan untuk lookup respons
|
| 44 |
+
df_jawaban = pd.read_excel(DATASET_PATH)
|
| 45 |
|
| 46 |
print("✅ ONNX model loaded!")
|
| 47 |
|
|
|
|
| 54 |
pass
|
| 55 |
|
| 56 |
|
| 57 |
+
# === Default responses (Digunakan sebagai sumber jawaban) ===
|
| 58 |
responses = {
|
| 59 |
"about_me": "I am a passionate developer specializing in AI and web development.",
|
| 60 |
"skills": "My main skills are HTML5, CSS3, JavaScript, Laravel, Node.js, TensorFlow, and PyTorch.",
|
|
|
|
| 81 |
|
| 82 |
try:
|
| 83 |
# 1. Tokenisasi (return_tensors="np" karena kita menggunakan NumPy/ONNX)
|
|
|
|
| 84 |
inputs = tokenizer(req.text, return_tensors="np", padding=True, truncation=True, max_length=128)
|
| 85 |
|
| 86 |
# 2. Dapatkan nama input dari ONNX Session
|
|
|
|
| 96 |
logits = ort_outputs[0]
|
| 97 |
pred_id = np.argmax(logits, axis=1)[0]
|
| 98 |
|
| 99 |
+
# === Mapping ID ke label ===
|
| 100 |
id2label = {
|
| 101 |
0: "about_me", 1: "skills", 2: "projects", 3: "experience",
|
| 102 |
4: "career_goal", 5: "greeting",
|
| 103 |
}
|
| 104 |
intent = id2label.get(pred_id, "fallback")
|
| 105 |
|
| 106 |
+
# === Ambil jawaban dari dictionary responses (Bukan DataFrame) ===
|
| 107 |
+
# Karena DataFrame Anda tidak berisi kolom Jawaban
|
| 108 |
+
reply = responses.get(intent, responses["fallback"])
|
| 109 |
+
|
|
|
|
|
|
|
|
|
|
| 110 |
return {"reply": str(reply), "intent": intent}
|
| 111 |
|
| 112 |
except Exception as e:
|
| 113 |
import traceback
|
| 114 |
print(f"❌ Runtime error: {e}")
|
| 115 |
traceback.print_exc()
|
| 116 |
+
# Mengembalikan error yang valid agar frontend tahu ada masalah
|
| 117 |
return {"reply": "⚠️ Internal server error.", "intent": "error"}
|