from fastapi import FastAPI, HTTPException, Body from app import schemas from app.services.llm_engine import llm_engine from app.services.skill_manager import skill_manager import pandas as pd import pickle import ast import os from sklearn.metrics.pairwise import linear_kernel from app.services.psych_service import psych_service from typing import List app = FastAPI(title="MORA - Mentori AI Assistant") # --- GLOBAL MODELS STORE --- models = { 'df': None, 'tfidf': None, 'matrix': None } SKILL_KEYWORDS = [] @app.on_event("startup") def load_skill_keywords(): global SKILL_KEYWORDS try: current_dir = os.path.dirname(os.path.abspath(__file__)) csv_path = os.path.join(current_dir, "data", "Skill Keywords.csv") df = pd.read_csv(csv_path) SKILL_KEYWORDS = df['keyword'].dropna().tolist() print(f"✅ Berhasil memuat {len(SKILL_KEYWORDS)} keywords skill.") except Exception as e: print(f"⚠️ Gagal memuat dataset keyword: {e}") SKILL_KEYWORDS = [] # Fungsi Pembantu: Mencari keyword dalam pesan user def find_keywords_in_text(user_text: str): found = [] text_lower = " " + user_text.lower() + " " # Tambah spasi biar aman deteksi kata pendek for k in SKILL_KEYWORDS: # Cek sederhana: Apakah keyword ada di dalam pesan? # Untuk kata pendek (<3 huruf) seperti "C", "R", "Go", kita pakai spasi agar tidak match "Car" atau "Goat" if len(k) < 3: if f" {k.lower()} " in text_lower: found.append(k) else: if k.lower() in text_lower: found.append(k) # Hapus duplikat dan kembalikan return list(set(found)) # --- 1. STARTUP: LOAD MODEL .PKL --- @app.on_event("startup") def load_models(): print("🔄 Loading Pre-trained Models...") # Menggunakan Absolute Path agar aman dijalankan dari mana saja current_dir = os.path.dirname(os.path.abspath(__file__)) base_dir = os.path.dirname(current_dir) artifacts_dir = os.path.join(base_dir, "model_artifacts") try: with open(os.path.join(artifacts_dir, 'courses_df.pkl'), 'rb') as f: models['df'] = pickle.load(f) with open(os.path.join(artifacts_dir, 'tfidf_vectorizer.pkl'), 'rb') as f: models['tfidf'] = pickle.load(f) with open(os.path.join(artifacts_dir, 'tfidf_matrix.pkl'), 'rb') as f: models['matrix'] = pickle.load(f) print(f"✅ Models Loaded Successfully from: {artifacts_dir}") except Exception as e: print(f"❌ Error Loading Models: {e}") print(f"👉 Pastikan folder 'model_artifacts' ada di: {base_dir}") # --- 2. ENDPOINT REKOMENDASI (ML POWERED) --- @app.post("/recommendations", response_model=List[schemas.RecommendationItem], tags=["Recommendation"]) async def generate_recommendations(req: schemas.UserProfile): user_data = req.model_dump() if hasattr(req, 'model_dump') else req.dict() # Fungsi ini sekarang me-return List murni (karena sudah di-unwrap di service) result_list = await llm_engine.generate_curriculum_stateless(user_data) return result_list # --- 3. ENDPOINT CHAT ROUTER --- # app/main.py (Bagian process_chat saja) @app.post("/chat/process", response_model=schemas.ChatResponse, tags=["Main Router"]) async def process_chat(req: schemas.ChatRequest): available_skill_names = [] role_data = None # Cek apakah Role ada isinya? (Safety check) if req.role and req.role.strip() != "": role_data = skill_manager.get_role_data(req.role) if role_data: available_skill_names = [s['name'] for s in role_data['sub_skills']] # --- [Keyword Search Logic Tetap Ada] --- found_keywords = find_keywords_in_text(req.message) if found_keywords: keyword_context = ", ".join(found_keywords) dataset_status = "FOUND" else: keyword_context = "NONE" dataset_status = "NOT_FOUND" # --- [UPDATE BARU: Ektrak Silabus Lengkap] --- # Kita buat string rapi berisi Skill + Topik-topiknya found_keywords = find_keywords_in_text(req.message) # Siapkan context string untuk dikirim ke LLM if found_keywords: # Jika ketemu: "User bertanya tentang: Python, SQL" keyword_context = ", ".join(found_keywords) dataset_status = "FOUND" else: # Jika tidak ketemu keyword_context = "NONE" dataset_status = "NOT_FOUND" # [PENTING] Konversi History ke Dict agar tidak error di LLM history_dicts = [m.model_dump() if hasattr(m, 'model_dump') else m.dict() for m in req.history] # Kirim parameter lengkap ke Router intent = await llm_engine.process_user_intent( user_text=req.message, available_skills=available_skill_names, user_role=req.role, history=history_dicts # Tambahan agar AI ingat konteks ) action = intent.get('action', 'CASUAL_CHAT') detected_skills_list = intent.get('detected_skills', []) user_role_is_empty = not req.role or req.role.strip() == "" restricted_actions = ["START_EXAM", "GET_RECOMMENDATION", "CHECK_PROGRESS"] if action in restricted_actions and user_role_is_empty: print(f"DEBUG: Role Kosong mencoba {action} -> BELOKKAN KE CASUAL_CHAT") action = "CASUAL_CHAT" # ============================================================ final_reply = "" response_data = None # 3. Logic if action == "START_EXAM": target_skill_ids = [] # A. Cari ID untuk SEMUA skill yang dideteksi (Looping) if detected_skills_list and role_data: for ds in detected_skills_list: for s in role_data['sub_skills']: # Cek kemiripan nama if s['name'].lower() in ds.lower() or ds.lower() in s['name'].lower(): if s['id'] not in target_skill_ids: target_skill_ids.append(s['id']) # B. Jika ada skill yang valid, generate soal untuk MASING-MASING skill if target_skill_ids: exam_list = [] for skid in target_skill_ids: # Ambil level user user_current_level = req.current_skills.get(skid, "beginner") skill_details = skill_manager.get_skill_details(req.role, skid) level_data = skill_details['levels'].get(user_current_level, skill_details['levels']['beginner']) # Generate Soal (Sequential) llm_res = await llm_engine.generate_question(level_data['exam_topics'], user_current_level) # Masukkan ke list soal exam_list.append({ "skill_id": skid, "skill_name": skill_details['name'], "level": user_current_level, "question": llm_res['question_text'], "context": llm_res['grading_rubric'] }) # C. Format Response Baru (Multi-Exam) response_data = { "mode": "multiple_exams", # Penanda buat frontend "exams": exam_list # List soal ada di sini } skill_display = ", ".join([x['skill_name'] for x in exam_list]) final_reply = f"Siap! Saya siapkan {len(exam_list)} ujian untukmu: **{skill_display}**. Silakan kerjakan satu per satu di bawah ini! 👇" else: action = "CASUAL_CHAT" final_reply = await llm_engine.casual_chat( req.message, [m.dict() for m in req.history], keyword_context, dataset_status ) elif action == "START_PSYCH_TEST": response_data = {"trigger_psych_test": True} final_reply = "Tenang, Mora punya tes kepribadian singkat untuk membantumu memilih job role antara **AI Engineer** atau **Front-End Developer**. Yuk coba sekarang! 👇" elif action == "GET_RECOMMENDATION": response_data = {"trigger_recommendation": True} final_reply = "Sedang menganalisis kebutuhan belajarmu..." elif action == "CHECK_PROGRESS": response_data = {"trigger_progress_report": True} final_reply = "Siap! Berikut adalah ringkasan progress belajar kamu sejauh ini. Silakan dicek di dashboard ya! 📊🚀" elif action == "CASUAL_CHAT": print(f"DEBUG ROLE STATUS: '{req.role}' -> is_empty={user_role_is_empty}") history_dicts = [m.model_dump() if hasattr(m, 'model_dump') else m.dict() for m in req.history] reply_text = await llm_engine.casual_chat( user_text=req.message, history=history_dicts, is_role_empty=user_role_is_empty ) final_reply = reply_text return schemas.ChatResponse( reply=final_reply, action_type=action, data=response_data ) @app.post("/exam/submit", response_model=schemas.EvaluationResponse, tags=["Test Sub Skill"]) async def submit_exam(sub: schemas.AnswerSubmission): evaluation = await llm_engine.evaluate_answer( user_answer=sub.user_answer, question_context={ "question_text": "REFER TO CONTEXT", "grading_rubric": sub.question_context } ) is_passed = evaluation['is_correct'] and evaluation['score'] >= 70 suggested_lvl = "intermediate" if is_passed else None # Logika sederhana return schemas.EvaluationResponse( is_correct=evaluation['is_correct'], score=evaluation['score'], feedback=evaluation['feedback'], passed=is_passed, suggested_new_level=suggested_lvl ) # --- 5. ENDPOINT PROGRESS --- @app.post("/progress/analyze", tags=["Track Progress"]) async def get_progress_analysis(data: schemas.ProgressData): # Konversi objek Pydantic ke Dictionary biasa progress_dict = data.dict() # Panggil LLM khusus analisis analysis_text = await llm_engine.analyze_progress( user_name=data.user_name, progress_data=progress_dict ) return {"analysis": analysis_text} # ========================================== # ENDPOINT PSIKOLOGI (JOB ROLE TEST) # ========================================== @app.get("/psych/questions", response_model=List[schemas.PsychQuestionItem], tags=["Test Job Role"]) def get_psych_questions(): """Mengambil daftar soal tes kepribadian.""" return psych_service.get_all_questions() @app.post("/psych/submit", response_model=schemas.PsychResultResponse, tags=["Test Job Role"]) async def submit_psych_test(req: schemas.PsychSubmitRequest): """Menerima jawaban user, hitung skor, dan minta analisis LLM.""" # 1. Hitung Skor secara matematis result = psych_service.calculate_result(req.answers) winner = result["winner"] scores = result["scores"] traits = result["traits"] # 2. Minta LLM buatkan kata-kata mutiara/analisis analysis_text = await llm_engine.analyze_psych_result(winner, traits) return schemas.PsychResultResponse( suggested_role=winner, analysis=analysis_text, scores=scores )