| | from fastapi import FastAPI |
| | from fastapi.middleware.cors import CORSMiddleware |
| | from pydantic import BaseModel |
| | from transformers import AutoTokenizer |
| | import onnxruntime as ort |
| | import numpy as np |
| | from pathlib import Path |
| | import traceback |
| |
|
| | |
| | app = FastAPI(title="Portfolio Chatbot API", version="1.0") |
| |
|
| | |
| | app.add_middleware( |
| | CORSMiddleware, |
| | allow_origins=["*"], |
| | allow_credentials=True, |
| | allow_methods=["*"], |
| | allow_headers=["*"], |
| | ) |
| |
|
| | |
| | BASE_DIR = Path(__file__).resolve().parent |
| | MODEL_PATH = BASE_DIR / "models" / "bert_chatbot.onnx" |
| | TOKENIZER_PATH = BASE_DIR / "models" / "bert-base-multilingual-cased" |
| |
|
| | |
| | tokenizer = None |
| | session = None |
| |
|
| | |
| | def load_model(): |
| | global tokenizer, session |
| | try: |
| | print("π Loading tokenizer dan ONNX model...") |
| | tokenizer = AutoTokenizer.from_pretrained(str(TOKENIZER_PATH)) |
| | session = ort.InferenceSession(str(MODEL_PATH), providers=["CPUExecutionProvider"]) |
| | print("β
Model dan tokenizer berhasil dimuat!") |
| | print("π₯ Model expects inputs:", [i.name for i in session.get_inputs()]) |
| | except Exception as e: |
| | print("β ERROR saat memuat model/tokenizer:", e) |
| | traceback.print_exc() |
| |
|
| | load_model() |
| |
|
| | |
| | id2label = { |
| | 0: "about_me", |
| | 1: "career_goal", |
| | 2: "experience", |
| | 3: "fallback", |
| | 4: "greeting", |
| | 5: "projects", |
| | 6: "skills", |
| | } |
| |
|
| | |
| | responses = { |
| | "about_me": "I am a passionate developer specializing in AI and web development.", |
| | "skills": "My main skills are HTML5, CSS3, JavaScript, Laravel, Node.js, Database, TensorFlow, PyTorch, Firebase, and Jupyter Notebook.", |
| | "projects": "Some of my projects are Mobile Apps Bald Detection and Jupyter Notebook Bald Detection.", |
| | "experience": "I have worked as IT Support, AI Engineer, and Freelancer on multiple projects.", |
| | "career_goal": "My career goal is to become a Full Stack Developer and Machine Learning Engineer.", |
| | "greeting": "Hello! How can I help you regarding this portfolio?", |
| | "fallback": "I'm sorry, I don't understand. Please ask another question." |
| | } |
| |
|
| | |
| | class ChatRequest(BaseModel): |
| | text: str |
| |
|
| | |
| | @app.get("/") |
| | async def root(): |
| | return {"message": "π Portfolio Chatbot API is running successfully!"} |
| |
|
| | |
| | @app.post("/chatbot") |
| | async def chatbot(req: ChatRequest): |
| | """ |
| | Endpoint utama untuk memproses input teks dan mengembalikan intent serta respon. |
| | """ |
| | intent = "fallback" |
| |
|
| | |
| | if session is None or tokenizer is None: |
| | return {"reply": responses["fallback"], "intent": "error_loading"} |
| |
|
| | try: |
| | |
| | inputs = tokenizer( |
| | req.text, |
| | return_tensors="np", |
| | padding=True, |
| | truncation=True, |
| | max_length=128 |
| | ) |
| |
|
| | |
| | expected_inputs = [i.name for i in session.get_inputs()] |
| | ort_inputs = {k: v.astype(np.int64) for k, v in inputs.items() if k in expected_inputs} |
| |
|
| | |
| | ort_outputs = session.run(None, ort_inputs) |
| | logits = ort_outputs[0] |
| |
|
| | |
| | pred_id = int(np.argmax(logits, axis=1)[0]) |
| | intent = id2label.get(pred_id, "fallback") |
| |
|
| | |
| | reply = responses.get(intent, responses["fallback"]) |
| |
|
| | print(f"π§ Input: {req.text} | Intent: {intent} | Reply: {reply}") |
| |
|
| | return {"reply": reply, "intent": intent} |
| |
|
| | except Exception as e: |
| | print("β Runtime error:", e) |
| | traceback.print_exc() |
| | return {"reply": "β οΈ Internal server error.", "intent": intent} |
| |
|